Snowshoe Hare in the Upper Midwest

Non-stationary models with physical barriers and non-linear covariates

Set Directory

setwd("~/Michigan_State/Sean")

Load and Prep Observation Data

Michigan

Loading raw Michigan data, dropping previously acquired environmental covariates and summing trial successes.

MI.df = read.csv("./Data_102418/mi_data10-24-18.csv",  
                   header = TRUE, stringsAsFactors = FALSE, sep=",") 

MI.df = MI.df %>%
        mutate(Long = Lat, #these appear reversed in original data
               Lat = Lon,
               State = "Michigan", #transect state
               Spp = "Hare",
               OBS = Hare, #overall presence (at any trial)
               Counts = T.1 + T.2 + T.3 + T.4 + T.5 + T.6 + T.7 + T.8 + T.9, #Sum across trials
               Trials = 9, #No NAs in this dataset all have 9 trials
               Data = "Fit",
               Site2 = paste("M", 1:nrow(MI.df), sep=".")) %>%
        select(Site2, Long, Lat, State, OBS, Counts, Trials, Data, Spp)

dim(MI.df) #Check data
## [1] 125   9
head(MI.df)
##   Site2      Long      Lat    State OBS Counts Trials Data  Spp
## 1   M.1 -88.24388 47.41681 Michigan   1      6      9  Fit Hare
## 2   M.2 -84.58309 45.53829 Michigan   1      6      9  Fit Hare
## 3   M.3 -84.14892 45.46854 Michigan   1      6      9  Fit Hare
## 4   M.4 -90.07851 46.35078 Michigan   1      1      9  Fit Hare
## 5   M.5 -86.56820 46.37911 Michigan   1      1      9  Fit Hare
## 6   M.6 -86.75846 46.25573 Michigan   1      9      9  Fit Hare

Wisconsin

Loading data as above, but trials vary by transect, therefore, they are individually summed.

WI.df = read.csv("./Data_102418/wi_data10-24-18.csv",  
                   header = TRUE, stringsAsFactors = FALSE, sep=",")

WI.Trial.df = WI.df[2:21] #pull-out transects

WI.Trial.df[WI.Trial.df >= 1] = 1 #set track counts to 1

WI.Trial.df$Counts = rowSums(WI.Trial.df, na.rm=T) #sum total successes by transect

WI.Trial.df$Trials = rowSums(is.na(WI.df[2:21])==FALSE) #count number of trials

WI.df = WI.df %>% #Basically the same as MI above
        mutate(Long = Lat, 
               Lat = Lon,
               State = "Wisconsin",
               OBS = Hare,
               Spp = "Hare",
               Data = "Fit",
               Counts = WI.Trial.df$Counts, #Sum across trials
               Trials = WI.Trial.df$Trials, #trials vary by transect
               Site2 = paste("W", 1:nrow(WI.df), sep=".")) %>% #Site identifier
        select(Site2, Long, Lat, State, OBS, Counts, Trials, Data, Spp)

dim(WI.df)
## [1] 195   9
head(WI.df)
##   Site2      Long      Lat     State OBS Counts Trials Data  Spp
## 1   W.1 -90.72816 44.36486 Wisconsin   1      0      8  Fit Hare
## 2   W.2 -90.84052 44.27223 Wisconsin   0      0      6  Fit Hare
## 3   W.3 -90.82058 44.45853 Wisconsin   1      5      8  Fit Hare
## 4   W.4 -92.11831 45.44736 Wisconsin   0      0      6  Fit Hare
## 5   W.5 -91.22783 45.02406 Wisconsin   0      0      6  Fit Hare
## 6   W.6 -90.01462 44.21396 Wisconsin   0      0      8  Fit Hare

Combine & Convert to Points

Fit.df = rbind(MI.df, WI.df)

Fit.pnt = SpatialPointsDataFrame(Fit.df[, c("Long","Lat")], Fit.df) 
proj4string(Fit.pnt) = "+proj=longlat +datum=NAD83 +no_defs +ellps=GRS80 +towgs84=0,0,0"

Get Domain Boundaries

Defining domain extent, downloading state boundaries, and then creating raster versions for later plotting. Note that the “Lakes” file is used to better identify lake boundaries. A copy of this shapefile is in the folder with this script. The adress to the shapefile will need to be updated in this chunk.

World = map("world", 
            fill = TRUE, 
            plot = FALSE)

IDs = sapply(strsplit(World$names, ":"), function(x) x[1])

LL84 = "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"

WorldP = map2SpatialPolygons(World, IDs = IDs,
                             proj4string = CRS(projection(LL84)))

#Add a dataframe  
pid = sapply(slot(WorldP, "polygons"), 
             function(x) slot(x, "ID"))

p.df = data.frame( ID=1:length(WorldP), 
                   row.names = pid)

World = SpatialPolygonsDataFrame(WorldP, p.df)
World = spTransform(World, LL84)
World = gBuffer(World, width = 0, byid = F)

#States
States = map("state", 
             fill = TRUE, 
             plot = FALSE)


IDs = sapply(strsplit(States$names, ":"), function(x) x[1])

LL84 = "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"

StatesP = map2SpatialPolygons(States, IDs = IDs,
                              proj4string = CRS(projection(LL84)))

#Add a dataframe  
pid = sapply(slot(StatesP, "polygons"), 
             function(x) slot(x, "ID"))

p.df = data.frame( ID=1:length(StatesP), 
                   row.names = pid)

States = SpatialPolygonsDataFrame(StatesP, p.df)
States = spTransform(States, LL84)



Lakes = readOGR(dsn = "C:/Users/humph173/Documents/Michigan_State/Marten/ArcWork/Lakes", 
                layer = "Lake_2ks", 
                stringsAsFactors = FALSE)
## OGR data source with driver: ESRI Shapefile 
## Source: "C:\Users\humph173\Documents\Michigan_State\Marten\ArcWork\Lakes", layer: "Lake_2ks"
## with 1 features
## It has 15 fields
## Integer64 fields read as strings:  OBJECTID Id area InPoly_FID SimPgnFlag
LakesLL = spTransform(Lakes, proj4string(States))


Ext = c(-93.173415, -81.932841, 41.217132, 47.740649)
Domain = crop(States, Ext)
Domain$Name = str_cap_words(rownames(Domain@data))

Water0 = as(extent(Domain), "SpatialPolygons") 
p.df = data.frame(ID=1:length(Water0))

Water0 = SpatialPolygonsDataFrame(Water0, p.df, match.ID = F)
proj4string(Water0) = proj4string(Domain)
Water1 = gDifference(Water0, spTransform(Lakes, proj4string(Water0)))

DomLLU = gUnaryUnion(Domain)

#Rasterized version  
Ras = raster(res = 0.02, ext = extent(DomLLU),
             crs = proj4string(DomLLU))

Domain.r = rasterize(DomLLU, Ras, 
                     field = 0, 
                     background = NA)

#Point grid version
Grd.pnt = rasterToPoints(Domain.r, spatial = TRUE)

Grd.pnt@data = Grd.pnt@data %>%
  mutate(Long = Grd.pnt@coords[,1],
         Lat = Grd.pnt@coords[,2]) %>%
  select(-layer)

Quick Plot to Check Data

Locations used to fit Model.

rng = seq(0, 255, 1)

mCols  = brewer.pal(11, "RdYlBu")[-6] 
cr0 = rev(colorRampPalette((mCols))(n = 256))
cr = colorRampPalette(c("tan", cr0),  
                      bias = 1, space = "rgb")

MyMatrix = matrix(nrow=7, ncol=2)
rownames(MyMatrix) = rownames(coordinates(Domain))
MyMatrix[,1] = c(-89.07686, -86.11260, -91.91021, -83.82014, -91.31258, -83.46125, -89.51171)
MyMatrix[,2] = c(41.80000, 41.36514, 42.28710, 43.0, 47.5041, 41.61285, 43.63285)

Hare.LLpnt = spTransform(Fit.pnt, proj4string(States))

levelplot(Domain.r, 
          margin = FALSE,
          xlab = NULL, 
          ylab = NULL, 
          maxpixels = 1e5,
          col.regions = cr, at = rng,
          colorkey = FALSE, par.strip.text = list(fontface='bold', cex=1.5),
          par.settings = list(axis.line = list(col = "black"),
                              strip.background = list(col = 'transparent'), 
                              strip.border = list(col = 'transparent')),
          scales = list(cex = 1.25)) + 
  latticeExtra::layer(sp.polygons(Domain, col = "black", lwd = 0.5)) +
  latticeExtra::layer(sp.polygons(LakesLL, fill = "lightblue", col = "transparent", lwd = 0.5)) +
  latticeExtra::layer(sp.polygons(Hare.LLpnt , col = "red", pch=factor(Hare.LLpnt$OBS), cex = 1)) +
  latticeExtra::layer(sp.text(MyMatrix, txt = Domain$Name, 
                              pos =c(1,3,3,2,2,1,1), 
                              col="black",font=list(face="bold"), cex=1)) +
  latticeExtra::layer({SpatialPolygonsRescale(layout.north.arrow(),
                                              offset = c(-83, 45.5),
                                              scale = 2)})

Project Everything to KM

nProj = "+proj=utm +zone=16 +datum=NAD83 +units=km +no_defs +ellps=GRS80 +towgs84=0,0,0"

LakesKM = spTransform(Lakes, nProj)
FocalKM = spTransform(Water1, nProj)
DomP = spTransform(Domain, nProj)
DomPU = gUnaryUnion(DomP)

Fit.pntP = spTransform(Fit.pnt, nProj)

Grd.pntP = spTransform(Grd.pnt, nProj)

Construct Mesh and Define Physical Barriers

These values are scaled to correspond to the geographic projection (kilometers)

max.edge = 8 #Make the outer edge length 8km
bound.outer = 75 #Outer extension can be 75km

Fit.pntPX = subset(Fit.pntP, Spp == "Hare" & Data == "Fit") #Only training data

bdry = inla.sp2segment(DomPU) #Formatting boundary for r-INLA

mesh = inla.mesh.2d(boundary = bdry, #Boundary
                    loc = Fit.pntPX, #Fit to point locations
                    max.edge = c(1, 5)*max.edge, #mesh size specifications
                    cutoff = 8,
                    min.angle = 25,
                    offset = c(max.edge, bound.outer))
mesh$n #number of nodes
## [1] 5551
plot(mesh, lwd=0.5)

Define Spatial Barriers

Article (Bakka 2019): https://www.sciencedirect.com/science/article/pii/S221167531830099X See tutorial: https://haakonbakka.bitbucket.io/btopic107.html

tl = length(mesh$graph$tv[,1])

posTri = matrix(0, tl, 2)

for (t in 1:tl){
  temp = mesh$loc[mesh$graph$tv[t, ], ]
  posTri[t,] = colMeans(temp)[c(1,2)] 
}

posTri = SpatialPoints(posTri)
proj4string(posTri) = proj4string(FocalKM)

normal = over(DomPU, posTri, returnList=T)
normal = unlist(normal)
barrier.triangles = setdiff(1:tl, normal)

poly.barrier = inla.barrier.polygon(mesh, barrier.triangles)

Plot Mesh with Barriers

Red areas are barriers.

plot(mesh, main="Mesh")
plot(poly.barrier, add=T, col='red')
plot(mesh, add=T)

Validation Data

Wisconsin

WIV.df = read.csv("./WI_HareValid2000-18.csv",  
                   header = TRUE, stringsAsFactors = FALSE, sep=",")

WIV.df = WIV.df %>%
         select(X, Y, HARESA_12_, HARESA_13_,  HARESA_14_)


WIV.df[is.na(WIV.df)] = 0

WIV.df = WIV.df %>%
         mutate(Site2 = paste("WIV_", 1:dim(WIV.df)[1]), 
                Long = X,
                Lat = Y,
                Source = "WI",
                Spp = "Valid",
                State = "Wisconsin",
                Data = "Valid",
                Counts = (HARESA_12_ + HARESA_13_ + HARESA_14_),
                OBS = ifelse(Counts >= 1, 1, 0),
                Trials = 1) %>% 
        select(Site2, Long, Lat, State, OBS, Counts, Trials, Data, Spp, Source)

dim(WIV.df)
## [1] 51 10
head(WIV.df)
##    Site2     Long      Lat     State OBS Counts Trials  Data   Spp Source
## 1 WIV_ 1 459786.6 640333.1 Wisconsin   0      0      1 Valid Valid     WI
## 2 WIV_ 2 459119.3 623022.5 Wisconsin   0      0      1 Valid Valid     WI
## 3 WIV_ 3 448003.0 645595.7 Wisconsin   1     20      1 Valid Valid     WI
## 4 WIV_ 4 544112.4 409977.9 Wisconsin   1     40      1 Valid Valid     WI
## 5 WIV_ 5 429676.8 692700.4 Wisconsin   1     16      1 Valid Valid     WI
## 6 WIV_ 6 323853.0 603638.8 Wisconsin   1     58      1 Valid Valid     WI
#NAD_1983_HARN_Wisconsin_TM
WIval.nproj = "+proj=tmerc +lat_0=0 +lon_0=-90 +k=0.9996 +x_0=520000 +y_0=-4480000 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"

WIv.pnt = SpatialPointsDataFrame(WIV.df[, c("Long","Lat")], WIV.df) 
proj4string(WIv.pnt) = WIval.nproj

WIv.pnt = spTransform(WIv.pnt, nProj)

Hare (Sault Tribe)

library("readxl")

files = list.files(path="C:/Users/humph173/Documents/Michigan_State/Eric/Pellets", 
                   pattern="*.xlsx", full.names=T, recursive=FALSE)


for(i in 1:length(files)) {
  tmp.ex = as.data.frame(read_excel(files[i]))[,1:2]
  names(tmp.ex) = c("X", "Y")
  
  if(i == 1){Pellet.df = tmp.ex}
      else{Pellet.df = rbind(Pellet.df, tmp.ex)}
}
## New names:
## * `` -> ...3
## New names:
## * `` -> ...3
STV.df = Pellet.df %>%
               mutate(Site2 = paste("STV_", 1:dim(Pellet.df)[1]), 
                      Long = X,
                      Lat = Y,
                      Source = "ST",
                      Spp = "Valid",
                      State = "Michigan",
                      Data = "Valid",
                      Counts = 1,
                      OBS = 1,
                      Trials = 1) %>% 
        select(Site2, Long, Lat, State, OBS, Counts, Trials, Data, Spp, Source)

head(STV.df)
##    Site2     Long     Lat    State OBS Counts Trials  Data   Spp Source
## 1 STV_ 1 649079.4 5120467 Michigan   1      1      1 Valid Valid     ST
## 2 STV_ 2 661239.2 5135238 Michigan   1      1      1 Valid Valid     ST
## 3 STV_ 3 661243.7 5135231 Michigan   1      1      1 Valid Valid     ST
## 4 STV_ 4 661246.0 5135232 Michigan   1      1      1 Valid Valid     ST
## 5 STV_ 5 661253.6 5135214 Michigan   1      1      1 Valid Valid     ST
## 6 STV_ 6 661249.1 5135212 Michigan   1      1      1 Valid Valid     ST
dim(STV.df)
## [1] 847  10
MnProj = "+proj=utm +zone=16 +datum=NAD83 +units=m +no_defs +ellps=GRS80 +towgs84=0,0,0"

STV.pnt = SpatialPointsDataFrame(STV.df[, c("Long","Lat")], STV.df) 
proj4string(STV.pnt) = MnProj

STV.pnt = spTransform(STV.pnt, nProj)

Predator Prey Data

PPV.df = read.csv("C:/Users/humph173/Documents/Michigan_State/Marten/Data/PredPrey/Pred_prey_Comb_121218.csv",  
                   header = TRUE, sep=",") 

PPV.df = PPV.df %>%
          mutate(Site2 = paste("PPV_", 1:dim(PPV.df)[1]), 
                      Long = Easting,
                      Lat = Northing,
                      Source = "PP",
                      Spp = "Valid",
                      State = "Michigan",
                      Data = "Valid",
                      Counts = 1,
                      OBS = 1,
                      Trials = 1) %>% 
        select(Site2, Long, Lat, State, OBS, Counts, Trials, Data, Spp, Source)

head(PPV.df)
##    Site2   Long     Lat    State OBS Counts Trials  Data   Spp Source
## 1 PPV_ 1 399299 5117030 Michigan   1      1      1 Valid Valid     PP
## 2 PPV_ 2 399299 5117030 Michigan   1      1      1 Valid Valid     PP
## 3 PPV_ 3 411292 5117450 Michigan   1      1      1 Valid Valid     PP
## 4 PPV_ 4 400838 5119454 Michigan   1      1      1 Valid Valid     PP
## 5 PPV_ 5 400838 5119454 Michigan   1      1      1 Valid Valid     PP
## 6 PPV_ 6 396858 5127841 Michigan   1      1      1 Valid Valid     PP
dim(PPV.df)
## [1] 25939    10
PPV.pnt = SpatialPointsDataFrame(PPV.df[, c("Long","Lat")], PPV.df) 
proj4string(PPV.pnt) = MnProj

PPV.pnt = spTransform(PPV.pnt, nProj)

Lower Survey (Pellet )

LPS.df = read.csv("./2019_pellet_sites.csv",  
                   header = TRUE, sep=",") 

LPS.df = LPS.df %>%
          mutate(Site2 = paste("PPV_", 1:dim(LPS.df)[1]), 
                      Long = Latitude, #Reversed in data
                      Lat = Longitude,
                      Source = "LPP",
                      Spp = "Valid",
                      State = "Michigan",
                      Data = "Valid",
                      Counts = 1,
                      OBS = CurrentOcc,
                      Trials = 1) %>% 
        filter(plot_ID != "main") %>% #Remove Original Surveys
        select(Site2, Long, Lat, State, OBS, Counts, Trials, Data, Spp, Source)

head(LPS.df)
##    Site2     Long      Lat    State OBS Counts Trials  Data   Spp Source
## 1 PPV_ 1 619035.8 519897.4 Michigan   1      1      1 Valid Valid    LPP
## 2 PPV_ 2 617098.3 528769.7 Michigan   1      1      1 Valid Valid    LPP
## 3 PPV_ 4 620157.4 513569.1 Michigan   1      1      1 Valid Valid    LPP
## 4 PPV_ 5 609769.3 522842.3 Michigan   0      1      1 Valid Valid    LPP
## 5 PPV_ 6 608475.4 518285.8 Michigan   1      1      1 Valid Valid    LPP
## 6 PPV_ 7 613170.1 511702.0 Michigan   0      1      1 Valid Valid    LPP
dim(LPS.df)
## [1] 31 10
#Long & Lat Reversed & not UTM
#NAD_1983_2011_Michigan_GeoRef_Meters
StateGeoref.Proj = "+proj=omerc +lat_0=45.30916666666666 +lonc=-86 +alpha=337.25556 +k=0.9996 +x_0=2546731.496 +y_0=-4354009.816 +no_uoff +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"

LPS.pnt = SpatialPointsDataFrame(LPS.df[, c("Long","Lat")], LPS.df) 
proj4string(LPS.pnt) =StateGeoref.Proj 

LPS.pnt = spTransform(LPS.pnt, nProj)

Update Coordinates and Combine

Fit.pnt.tab = Fit.pntP@data %>%
              mutate(Source = "Fit",
                     Long = Fit.pntP@coords[,1],
                     Lat = Fit.pntP@coords[,2])

WIv.pnt.tab = WIv.pnt@data %>%
              mutate(Long = WIv.pnt@coords[,1],
                     Lat = WIv.pnt@coords[,2])


PPV.pnt.tab = PPV.pnt@data %>%
              mutate(Long = PPV.pnt@coords[,1],
                     Lat = PPV.pnt@coords[,2])

STV.pnt.tab = STV.pnt@data %>%
              mutate(Long = STV.pnt@coords[,1],
                     Lat = STV.pnt@coords[,2])

LPS.pnt.tab = LPS.pnt@data %>%
              mutate(Long = LPS.pnt@coords[,1],
                     Lat = LPS.pnt@coords[,2])

Combine All Data

Join data to common dataframe.

hare.df = rbind(Fit.pnt.tab, WIv.pnt.tab, STV.pnt.tab, PPV.pnt.tab, LPS.pnt.tab)

dim(hare.df)
## [1] 27188    10
hare.df %>%
  group_by(State, Data) %>%
  summarise(Count = length(Data))
## # A tibble: 4 x 3
## # Groups:   State [2]
##   State     Data  Count
##   <chr>     <chr> <int>
## 1 Michigan  Fit     125
## 2 Michigan  Valid 26817
## 3 Wisconsin Fit     195
## 4 Wisconsin Valid    51

Convert to Points

Hare.pnt = SpatialPointsDataFrame(hare.df[, c("Long","Lat")], hare.df) 
proj4string(Hare.pnt) = nProj

Quick Plot to Check Validation Data

Locations used for model validation.

rng = seq(0, 255, 1)

mCols  = brewer.pal(11, "RdYlBu")[-6] 
cr0 = rev(colorRampPalette((mCols))(n = 256))
cr = colorRampPalette(c("tan", cr0),  
                      bias = 1, space = "rgb")

MyMatrix = matrix(nrow=7, ncol=2)
rownames(MyMatrix) = rownames(coordinates(Domain))
MyMatrix[,1] = c(-89.07686, -86.11260, -91.91021, -83.82014, -91.31258, -83.46125, -89.51171)
MyMatrix[,2] = c(41.80000, 41.36514, 42.28710, 43.0, 47.5041, 41.61285, 43.63285)

Hare.LLpnt = spTransform(Hare.pnt, proj4string(States))
Hare.LLpnt = subset(Hare.LLpnt, Data == "Valid")

levelplot(Domain.r, 
          margin = FALSE,
          xlab = NULL, 
          ylab = NULL, 
          maxpixels = 1e5,
          col.regions = cr, at = rng,
          colorkey = FALSE, par.strip.text = list(fontface='bold', cex=1.5),
          par.settings = list(axis.line = list(col = "black"),
                              strip.background = list(col = 'transparent'), 
                              strip.border = list(col = 'transparent')),
          scales = list(cex = 1.25)) + 
  latticeExtra::layer(sp.polygons(Domain, col = "black", lwd = 0.5)) +
  latticeExtra::layer(sp.polygons(LakesLL, fill = "lightblue", col = "transparent", lwd = 0.5)) +
  latticeExtra::layer(sp.polygons(Hare.LLpnt , col = "red", pch=factor(Hare.pnt$Data), cex = 1)) +
  latticeExtra::layer(sp.text(MyMatrix, txt = Domain$Name, 
                              pos =c(1,3,3,2,2,1,1), 
                              col="black",font=list(face="bold"), cex=1)) +
  latticeExtra::layer({SpatialPolygonsRescale(layout.north.arrow(),
                                              offset = c(-83, 45.5),
                                              scale = 2)})

Combine Observations, Mesh Nodes, and Grid Points

#Node coordinates
dd = as.data.frame(cbind(mesh$loc[,1], 
                         mesh$loc[,2]))

names(dd) = c("Long", "Lat") #name coordinates
dd$OBS = 0 #no hare at these locations
dd$Site2 = paste("N", 1:nrow(dd), sep = ".") #to match with observation data
dd$State = "All" 
dd$Spp = "Mesh"
dd$Counts = 0
dd$Trials = 0
dd$Source = "Mesh"
dd$Data = "Fit"
#Hare Obs
hare.set = Hare.pnt@data %>%
           mutate(Long = Hare.pnt@coords[,1],
                  Lat = Hare.pnt@coords[,2])

#Grid
grid.set = Grd.pntP@data %>%
           mutate(Long = Grd.pntP@coords[,1],
                  Lat = Grd.pntP@coords[,2],
                  Spp = "Grid",
                  OBS = 0,
                  State = "All",
                  Counts = 0,
                  Trials = 0,
                  Data = "grid",
                  Source = "Grid",
                  Site2 = paste("G", 1:nrow(Grd.pntP@data), sep = "."))


All.pnts = rbind(hare.set, dd, grid.set)
All.pnts = SpatialPointsDataFrame(All.pnts[, c("Long","Lat")], All.pnts)
proj4string(All.pnts) = nProj

Extract Covariates

Forest1km = raster("C:/Users/humph173/Documents/Michigan_State/Sean/Loop_020819/Forest1km.grd")
mSnow5Yr = raster("./Hare1/Mean5yrSnow.tif")
mxTemp = raster("./Hare1/meanTMAX.tif")

All.pnts$mSnow5yrE = extract(mSnow5Yr, 
                                spTransform(All.pnts,
                                CRS(proj4string(mSnow5Yr))), 
                                method="simple") 

All.pnts$mSnow5yrE[is.na(All.pnts$mSnow5yrE)] = mean(All.pnts$mSnow5yrE, na.rm=T)

All.pnts$mxTempE = extract(mxTemp, 
                                spTransform(All.pnts,
                                CRS(proj4string(mxTemp))), 
                                method="simple") 

All.pnts$mxTempE[is.na(All.pnts$mxTempE)] = mean(All.pnts$mxTempE, na.rm=T)

All.pnts$Forest1kmE = extract(Forest1km, 
                                spTransform(All.pnts,
                                CRS(proj4string(Forest1km))), 
                                method="simple") 

All.pnts$Forest1kmE[is.na(All.pnts$Forest1kmE)] = mean(All.pnts$Forest1kmE, na.rm=T)

All.pnts$Forest1kmE = round(All.pnts$Forest1kmE/100, 1)
 
### 2019 Lower #####################################################
mSnow5Yr2019 = raster("./Mean5yrlp2019.tif")
mxTemp2019 = raster("./meanTMAXLP2019.tif")

All.pnts$mSnow5yrE2 = extract(mSnow5Yr2019, 
                                spTransform(All.pnts,
                                CRS(proj4string(mSnow5Yr2019))), 
                                method="simple") 

All.pnts$mSnow5yrE2[is.na(All.pnts$mSnow5yrE2)] = mean(All.pnts$mSnow5yrE2, na.rm=T)

All.pnts$mxTempE2 = extract(mxTemp2019, 
                                spTransform(All.pnts,
                                CRS(proj4string(mxTemp2019))), 
                                method="simple") 

All.pnts$mxTempE2[is.na(All.pnts$mxTempE2)] = mean(All.pnts$mxTempE2, na.rm=T)

###Select Variable Year
All.pnts$mSnow5yrE = ifelse(All.pnts$Source == "LPP", All.pnts$mSnow5yrE2, All.pnts$mSnow5yrE)

All.pnts$mxTempE2 = ifelse(All.pnts$Source == "LPP", All.pnts$mxTempE2, All.pnts$mxTempE)

###Clean up frame

All.pnts@data = All.pnts@data %>% select(-c(mSnow5yrE2, mxTempE2))

Identify the UP

Identify locations from the U.P. based on a well-defined county boundaries

UP = readOGR(dsn = "C:/Users/humph173/Documents/Michigan_State/SLP_Beam_Diam/Counties_v17a", 
                layer = "MI_UP", 
                stringsAsFactors = FALSE)
## OGR data source with driver: ESRI Shapefile 
## Source: "C:\Users\humph173\Documents\Michigan_State\SLP_Beam_Diam\Counties_v17a", layer: "MI_UP"
## with 15 features
## It has 15 fields
## Integer64 fields read as strings:  OBJECTID FIPSNUM
XX = readOGR(dsn = "C:/Users/humph173/Documents/Michigan_State/Sean/Hare1/CoordTest", 
                layer = "Test7b", 
                stringsAsFactors = FALSE)
## OGR data source with driver: ESRI Shapefile 
## Source: "C:\Users\humph173\Documents\Michigan_State\Sean\Hare1\CoordTest", layer: "Test7b"
## with 35 features
## It has 13 fields
## Integer64 fields read as strings:  FID_1 Site CurrentOcc Site_ID BUFF_DIST
UPp = gUnaryUnion(spTransform(UP, proj4string(All.pnts)))

All.pnts$UP = is.na(over(All.pnts, UPp))

Update State Designation to Include UP

Cleaning up labels

All.pnts$StateUP = ifelse(All.pnts$UP == FALSE, "Mich.UP", All.pnts$State)
All.pnts$StateUPW = ifelse(All.pnts$StateUP == "Mich.UP", "Wisconsin", All.pnts$StateUP)
All.pnts$Domain = over(All.pnts, DomPU)

levels(factor(All.pnts$StateUP))
## [1] "All"       "Mich.UP"   "Michigan"  "Wisconsin"
levels(factor(All.pnts$StateUPW))
## [1] "All"       "Michigan"  "Wisconsin"

Seperate Datasets

Hare.mod = subset(All.pnts, Spp == "Hare") #Observations only

Mesh.mod = subset(All.pnts, Spp == "Mesh" & is.na(Domain) == FALSE) #Mesh locations excluding buffer extension

HareMesh.mod = subset(All.pnts, Spp != "Grid" &  Spp != "Valid") #Observations excluding Grid points for prediction/plotting
HareMesh.mod$ID = 1:nrow(HareMesh.mod@data)

Validation.set = subset(All.pnts, Data == "Valid")


Grd.pnts = subset(All.pnts, Spp == "Grid") #Grid loactions for prediction/plotting

Add Background Points to Validation Set

Validation.set2 = spRbind(Validation.set, Mesh.mod)

Validation.set2@data %>%
  group_by(OBS) %>%
  summarise(Cnt =length(OBS))
## # A tibble: 2 x 2
##     OBS   Cnt
##   <dbl> <int>
## 1     0  4739
## 2     1 26857
Validation.set2$dups = duplicated(Validation.set2@data[,c("Long","Lat")])
Validation.set2 = subset(Validation.set2, dups == FALSE)

Colinearty Check

Index value needs to be below 30. This suggest colineartity will not be an issue.

library(perturb)
## 
## Attaching package: 'perturb'
## The following object is masked from 'package:raster':
## 
##     reclassify
Colin.df = HareMesh.mod@data %>% #select covariates
            select(mSnow5yrE, mxTempE, Forest1kmE)

CorCov = cor(Colin.df) #calculate correlation

corrplot(CorCov) #view correlation table

CI = colldiag(CorCov) #Apply metric
CI
## Condition
## Index    Variance Decomposition Proportions
##          intercept mSnow5yrE mxTempE Forest1kmE
## 1  1.000 0.029     0.016     0.172   0.029     
## 2  1.682 0.598     0.001     0.563   0.001     
## 3  5.779 0.374     0.984     0.265   0.970

Define Regions (Three Regions)

Converting each region to an integer value to be used to internally “replicate” estimates by region.

HR.df = HareMesh.mod@data

levels(factor(HR.df$StateUP)) #Level names
## [1] "All"       "Mich.UP"   "Michigan"  "Wisconsin"
HR.df$StateUPWI = as.integer(as.factor(HR.df$StateUP)) #convert to integer
levels(factor(HR.df$StateUPWI)) #levels as integer
## [1] "1" "2" "3" "4"
#Keep U.P. Hare observations as "1", set other regions to "0"
MupSet = HR.df
MupSet$OBS2 = ifelse(MupSet$StateUPWI == "2", MupSet$OBS, 0)

#Keep Lower MI Hare observations as "1", set other regions to "0"
MSet = HR.df
MSet$OBS2 = ifelse(MSet$StateUPWI == "3", MSet$OBS, 0)


#Keep Wisconsin Hare observations as "1", set other regions to "0"
WSet = HR.df
WSet$OBS2 = ifelse(WSet$StateUPWI == "4", WSet$OBS, 0)

MupSet$Rep = 1 #Renumbering from 1-3 instead of 2-4
MSet$Rep = 2
WSet$Rep = 3

HR.df = rbind(MupSet, MSet, WSet) #Join data

HR.df %>% #Count of hare observations by region
  group_by(Rep) %>%
  summarise(Cnt = sum(OBS2))
## # A tibble: 3 x 2
##     Rep   Cnt
##   <dbl> <dbl>
## 1     1    38
## 2     2    38
## 3     3    53

Model Set-up

Spatial Priors and Indicies

Relating observations to mesh locations as a matrix and defining flat spatial priors.

#Relate mesh for detection level
locs = cbind(Hare.mod@coords[,1], Hare.mod@coords[,2]) #point locations

A.det = inla.spde.make.A(mesh, #the mesh
                         alpha = 2, #default setting
                         loc=locs) #our locations



#Relate mesh for covariate level
locs = cbind(HR.df[,"Long"], HR.df[,"Lat"])

A.env = inla.spde.make.A(mesh, 
                          alpha = 2,
                          loc=locs)


#Prior for barrier model
barrier.model = inla.barrier.pcmatern(mesh, #mesh
                                      barrier.triangles = barrier.triangles, #Lake boundaries
                                      prior.range = c(1000, 0.5), #0.5 probabilty of effect within 1000 km
                                      prior.sigma = c(1, 0.01))

#Same as above, but without barriers
spde = inla.spde2.pcmatern(mesh, 
                           prior.range = c(1000, 0.5), 
                           prior.sigma = c(1, 0.01))


#Create index to track locations of mesh nodes
field.det = inla.spde.make.index("field.det", spde$n.spde) #index for detection level

field.det.c = inla.spde.make.index("field.det.c", spde$n.spde) #copy of above to pass to covariate level

field.env = inla.spde.make.index("field.env", spde$n.spde) #index for covariate level

Seperate Set for Base Model

locs = cbind(HareMesh.mod@coords[,1], HareMesh.mod@coords[,2]) 

A.base = inla.spde.make.A(mesh, 
                          alpha = 2,
                          loc=locs)

field.base = inla.spde.make.index("field.base", spde$n.spde)


HR.df2 = HareMesh.mod@data

base.lst = list(c(field.base, 
                  list(intercept3 = 1)), 
                  list(XX = HR.df2[,"Long"])) #Placeholder only

base.stk = inla.stack(data = list(Y = HR.df2$OBS), #Standard model for comparison
                         A = list(A.base, 1), 
                   effects = base.lst,   
                       tag = "base.0")

Organize Data for Fitting

Need to use list objects rather than data frames.

#Detection level
DT.df = Hare.mod@data

DT.lst = list(c(field.det,                            #Spatial index
                list(intercept1 = 1)),                #Intercept
                list(XX = DT.df[,"Lat"],              #List of variables/covariates (placeholder for detection)
                     Site = DT.df[,"Site2"]))         #Site identifier (to allow sites to idependently vary)

detect.stk = inla.stack(data = list(Y = DT.df$Counts, #Number of successes
                Field.trials = DT.df$Trials),         #trials       
                           A = list(A.det, 1),        #Projection matrix
                     effects = DT.lst,                #Spatial index and covariates
                         tag = "det.0")               #just a name/tag

jdetect.stk = inla.stack(data = list(Y = cbind(DT.df$Counts, NA), #"NA" = space for next model level
                 Field.trials = DT.df$Trials), #as above
                            A = list(A.det, 1), 
                      effects = DT.lst,   
                          tag = "jdet.0")


###Environment/covariate level
HR.lst = list(c(field.env, #index for covariate level
                field.det.c, #copy of field from detection level
                list(intercept2 = 1)), #intercept
                list(mSnw5yr = round(HR.df[,"mSnow5yrE"], 3), #Snow weeks
                     mMxTemp = round(HR.df[,"mxTempE"], 3), #Temperature
                     Forest = HR.df[,"Forest1kmE"], #Forest
                     Region = HR.df[,"Rep"])) #Region identifier

env.stk = inla.stack(data = list(Y = HR.df$OBS, #Standard model for comparison
              Field.trials = rep(1, dim(HR.df)[1])), #1 trial
                         A = list(A.env, 1), 
                   effects = HR.lst,   
                       tag = "env.0")

jenv.stk = inla.stack(data = list(Y = cbind(NA, HR.df$OBS2), #NA = space for detection level
              Field.trials = rep(1, dim(HR.df)[1])),
                         A = list(A.env, 1), 
                   effects = HR.lst,   
                       tag = "jenv.0")


#Combine detection and covariate levels
Joint.stk = inla.stack(jdetect.stk, jenv.stk)

#Save data to run in HPCC
#save(list=c("Joint.stk", "barrier.model", "spde"), file="./HPC/Feb10/Comb_0210.RData") #File for HPCC

Three Regions Model

The joint models are computationaly expensive, so there were ran on the HPCC.

Site.prior = list(theta=list(prior = "normal", param=c(0, 3))) #Prior for site effect
Reg.prior = list(theta=list(prior = "normal", param=c(0, 3))) #prior for region

JFrm0 = Y ~ -1 + intercept1 + #intercept (detection level)
                 intercept2 + #Intercept (covariate level)
                f(field.det,  #spatial index (detection)
                  model=barrier.model) + #change "barrier.model" to "spde" for no-barrier version.
                f(field.det.c,       #Shared spatial field to account for correlation between model levels
                  copy = "field.det", #copied from detection level of model
                  fixed = FALSE) +
                f(field.env,  #spatial index for covariate level
                  model=barrier.model) +
                f(Site, #Site identifier
                  model="iid", 
                  hyper=Site.prior) + #site prior
               f(mMxTemp,         #temperature
                  model="rw1",   #random walk of order 1
                  replicate = Region, #replicate for each region
                  hyper = Reg.prior) + #prior
               f(mSnw5yr, #weekly snowfall
                  model="rw1",
                  replicate = Region,
                  hyper = Reg.prior) +
               f(Forest, #Forest
                  model="iid",
                  replicate = Region,
                  hyper = Reg.prior) 
  
  
#thetaJ = JModel.full$internal.summary.hyperpar$mean #from initial run (to speed up model)
thetaJ = c(1.1935975, 3.7979780, -2.1432853, 6.6863289, -0.6710575, -0.1090578, -0.9891725, 0.6587151, 1)

JModel.full = inla(JFrm0, #formula
               data = inla.stack.data(Joint.stk), #data list object
               family = c("binomial","binomial"), #families for detection and covariate levels
               verbose = FALSE, #Show running process
               Ntrials = inla.stack.data(Joint.stk)$Field.trials, #number trials (varies for detection level)
               control.fixed = list(prec = 0.001, #priors for intercept
                          prec.intercept = 0.0001), 
               control.predictor = list(
                               A = inla.stack.A(Joint.stk), #data again
                         compute = TRUE, #estimate fitted values
                            link = 1), #transform fitted values from logit
               control.mode = list(restart = TRUE, theta = thetaJ), #to speed up
               control.inla = list(strategy="gaussian", #to speed up
                                   int.strategy = "eb"),
               control.results = list(return.marginals.random = TRUE, #results to report
                                      return.marginals.predictor = TRUE),
               control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE)) #calculate indicies for comparison

Load Results from HPCC

load("~/Michigan_State/Sean/HPC/Feb10/Full_0210_RES.RData")

Model Summary

summary(JModel.full)
## 
## Call:
## c("inla(formula = JFrm0, family = c(\"binomial\", \"binomial\"), data = inla.stack.data(Joint.stk), ",  "    Ntrials = inla.stack.data(Joint.stk)$Field.trials, verbose = TRUE, ",  "    control.compute = list(dic = TRUE, cpo = TRUE, waic = TRUE), ",  "    control.predictor = list(A = inla.stack.A(Joint.stk), compute = TRUE, ",  "        link = 1), control.inla = list(strategy = \"gaussian\", ",  "        int.strategy = \"eb\"), control.results = list(return.marginals.random = TRUE, ",  "        return.marginals.predictor = TRUE), control.fixed = list(prec = 0.001, ",  "        prec.intercept = 1e-04), control.mode = list(restart = TRUE, ",  "        theta = thetaJ), num.threads = 8)")
## 
## Time used:
##  Pre-processing    Running inla Post-processing           Total 
##          4.4073      16567.9072          6.2731      16578.5876 
## 
## Fixed effects:
##               mean     sd 0.025quant 0.5quant 0.975quant    mode kld
## intercept1 -4.2962 0.5153    -5.3080  -4.2963    -3.2854 -4.2962   0
## intercept2 -7.8112 0.5426    -8.8766  -7.8112    -6.7467 -7.8112   0
## 
## Random effects:
## Name   Model
##  field.det   RGeneric2 
## field.env   RGeneric2 
## Site   IID model 
## mMxTemp   RW1 model 
## mSnw5yr   RW1 model 
## Forest   RW1 model 
## field.det.c   Copy 
## 
## Model hyperparameters:
##                          mean     sd 0.025quant 0.5quant 0.975quant
## Theta1 for field.det   1.1318 0.1286     0.8887   1.1277     1.3942
## Theta2 for field.det   3.9663 0.2198     3.5218   3.9713     4.3876
## Theta1 for field.env  -1.9256 0.9071    -4.0266  -1.7800    -0.5826
## Theta2 for field.env   6.4645 0.6470     5.3985   6.3888     7.9019
## Precision for Site     0.4169 0.1159     0.2327   0.4027     0.6848
## Precision for mMxTemp  0.6397 0.2478     0.2805   0.5985     1.2386
## Precision for mSnw5yr  1.0801 0.4197     0.4977   1.0004     2.1206
## Precision for Forest   1.7988 0.7076     0.7745   1.6816     3.5102
## Beta for field.det.c   0.4968 0.0692     0.3626   0.4961     0.6342
##                          mode
## Theta1 for field.det   1.1128
## Theta2 for field.det   3.9890
## Theta1 for field.env  -1.1864
## Theta2 for field.env   6.1023
## Precision for Site     0.3757
## Precision for mMxTemp  0.5223
## Precision for mSnw5yr  0.8630
## Precision for Forest   1.4636
## Beta for field.det.c   0.4934
## 
## Expected number of effective parameters(std dev): 317.82(0.00)
## Number of equivalent replicates : 56.42 
## 
## Deviance Information Criterion (DIC) ...............: 2049.43
## Deviance Information Criterion (DIC, saturated) ....: NULL
## Effective number of parameters .....................: 459.23
## 
## Watanabe-Akaike information criterion (WAIC) ...: 2285.58
## Effective number of parameters .................: 454.01
## 
## Marginal log-Likelihood:  -35119.18 
## CPO and PIT are computed
## 
## Posterior marginals for linear predictor and fitted values computed

Define Regions (Two Regions)

Treating Wisconsin and the UP as a single/combined region.

HR2.df = HareMesh.mod@data

levels(factor(HR2.df$StateUP)) #Level names
## [1] "All"       "Mich.UP"   "Michigan"  "Wisconsin"
HR2.df$StateUPWI = as.integer(as.factor(HR2.df$StateUP)) #convert to integer
levels(factor(HR2.df$StateUPWI)) #levels as integer
## [1] "1" "2" "3" "4"
#Keep U.P. and WI Hare observations as "1", set Michigan (Lower) locations to "0"
UP_WI_Set = HR2.df
UP_WI_Set$OBS3 = ifelse(UP_WI_Set$StateUPWI == "2" |
                        UP_WI_Set$StateUPWI == "4", UP_WI_Set$OBS, 0)

#Keep Lower MI Hare observations as "1", set other regions to "0"
MI_Set = HR2.df
MI_Set$OBS3 = ifelse(MI_Set$StateUPWI == "3", MI_Set$OBS, 0)


UP_WI_Set$Rep = 1 #Renumbering from 1-3 instead of 2-4
MI_Set$Rep = 2


HR2.df = rbind(UP_WI_Set, MI_Set) #Join data

HR2.df %>% #Count of hare observations by region
  group_by(Rep) %>%
  summarise(Cnt = sum(OBS3))
## # A tibble: 2 x 2
##     Rep   Cnt
##   <dbl> <dbl>
## 1     1    91
## 2     2    38

Two Regions Model Set-up

As above.

#Relate mesh for detection level
locs = cbind(Hare.mod@coords[,1], Hare.mod@coords[,2]) #point locations

A.det = inla.spde.make.A(mesh, #the mesh
                         alpha = 2, #default setting
                         loc=locs) #our locations



#Relate mesh for covariate level
locs = cbind(HR2.df[,"Long"], HR2.df[,"Lat"])

A.env = inla.spde.make.A(mesh, 
                          alpha = 2,
                          loc=locs)


#Prior for barrier model
barrier.model = inla.barrier.pcmatern(mesh, #mesh
                                      barrier.triangles = barrier.triangles, #Lake boundaries
                                      prior.range = c(1000, 0.5), #0.5 probabilty of effect within 1000 km
                                      prior.sigma = c(1, 0.01))

#Same as above, but without barriers
spde = inla.spde2.pcmatern(mesh, 
                           prior.range = c(1000, 0.5), 
                           prior.sigma = c(1, 0.01))


#Create index to track locations of mesh nodes
field.det = inla.spde.make.index("field.det", spde$n.spde) #index for detection level

field.det.c = inla.spde.make.index("field.det.c", spde$n.spde) #copy of above to pass to covariate level

field.env = inla.spde.make.index("field.env", spde$n.spde) #index for covariate level

Organize Two Regions Data for Fitting

#Detection level
DT.df = Hare.mod@data

DT.lst = list(c(field.det,                            #Spatial index
                list(intercept1 = 1)),                #Intercept
                list(XX = DT.df[,"Lat"],              #List of variables/covariates (placeholder for detection)
                     Site = DT.df[,"Site2"]))         #Site identifier (to allow sites to idependently vary)

jdetect.stk = inla.stack(data = list(Y = cbind(DT.df$Counts, NA), #"NA" = space for next model level
                 Field.trials = DT.df$Trials), #as above
                            A = list(A.det, 1), 
                      effects = DT.lst,   
                          tag = "jdet.0")


###Environment/covariate level
HR.lst = list(c(field.env, #index for covariate level
                field.det.c, #copy of field from detection level
                list(intercept2 = 1)), #intercept
                list(mSnw5yr = round(HR2.df[,"mSnow5yrE"], 3), #Snow weeks
                     mMxTemp = round(HR2.df[,"mxTempE"], 3), #Temperature
                     Forest = HR2.df[,"Forest1kmE"], #Forest
                     Region = HR2.df[,"Rep"])) #Region identifier

jenv.stk = inla.stack(data = list(Y = cbind(NA, HR2.df$OBS3), #NA = space for detection level
              Field.trials = rep(1, dim(HR2.df)[1])),
                         A = list(A.env, 1), 
                   effects = HR.lst,   
                       tag = "jenv.0")


#Combine detection and covariate levels
Joint2.stk = inla.stack(jdetect.stk, jenv.stk)

#Save data to run in HPCC
#save(list=c("Joint2.stk", "barrier.model", "spde"), file="./HPC/Feb25/TwoReg.RData") #File for HPCC
#save(list=c("Joint2.stk", "barrier.model", "spde"), file="./HPC/Mar10/TwoReg2.RData") #File for HPCC

Two Regions Model

Site.prior = list(theta=list(prior = "normal", param=c(0, 3))) #Prior for site effect
Reg.prior = list(theta=list(prior = "normal", param=c(0, 3))) #prior for region

JFrm0 = Y ~ -1 + intercept1 + #intercept (detection level)
                 intercept2 + #Intercept (covariate level)
                f(field.det,  #spatial index (detection)
                  model=barrier.model) + #change "barrier.model" to "spde" for no-barrier version.
                f(field.det.c,       #Shared spatial field to account for correlation between model levels
                  copy = "field.det", #copied from detection level of model
                  fixed = FALSE) +
                f(field.env,  #spatial index for covariate level
                  model=barrier.model) +
                f(Site, #Site identifier
                  model="iid", 
                  hyper=Site.prior) + #site prior
               f(mMxTemp,         #temperature
                  model="rw1",   #random walk of order 1
                  replicate = Region, #replicate for each region
                  hyper = Reg.prior) + #prior
               f(mSnw5yr, #weekly snowfall
                  model="rw1",
                  replicate = Region,
                  hyper = Reg.prior) +
               f(Forest, #Forest
                  model="iid",
                  replicate = Region,
                  hyper = Reg.prior) 
  
  
#thetaJ = JModel.full$internal.summary.hyperpar$mean #from initial run (to speed up model)
thetaJ = c(1.1935975, 3.7979780, -2.1432853, 6.6863289, -0.6710575, -0.1090578, -0.9891725, 0.6587151, 1)

JModel.2Reg = inla(JFrm0, #formula
                   data = inla.stack.data(Joint2.stk), #data list object
                   family = c("binomial","binomial"), #families for detection and covariate levels
                   verbose = FALSE, #Show running process
                   Ntrials = inla.stack.data(Joint2.stk)$Field.trials, #number trials 
                   control.fixed = list(prec = 0.001, #priors for intercept
                              prec.intercept = 0.0001), 
                   control.predictor = list(
                                   A = inla.stack.A(Joint2.stk), #data again
                             compute = TRUE, #estimate fitted values
                                link = 1), #transform fitted values from logit
                   control.mode = list(restart = TRUE, theta = thetaJ), #to speed up
                   control.inla = list(strategy="gaussian", #to speed up
                                       int.strategy = "eb"),
                   control.results = list(return.marginals.random = TRUE, #results to report
                                          return.marginals.predictor = TRUE),
                   control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE)) #calculate indicies for comparison

Two Regions Model

Ran on HPCC

load("~/Michigan_State/Sean/HPC/Mar10/TwoReg_0310_RES.RData")

summary(JModel.2Reg)
## 
## Call:
## c("inla(formula = JFrm0, family = c(\"binomial\", \"binomial\"), data = inla.stack.data(Joint2.stk), ",  "    Ntrials = inla.stack.data(Joint2.stk)$Field.trials, verbose = TRUE, ",  "    control.compute = list(dic = TRUE, cpo = TRUE, waic = TRUE), ",  "    control.predictor = list(A = inla.stack.A(Joint2.stk), compute = TRUE, ",  "        link = 1), control.inla = list(strategy = \"gaussian\", ",  "        int.strategy = \"eb\"), control.results = list(return.marginals.random = TRUE, ",  "        return.marginals.predictor = TRUE), control.fixed = list(prec = 0.001, ",  "        prec.intercept = 1e-04), control.mode = list(restart = TRUE, ",  "        theta = thetaJ), num.threads = 8)")
## 
## Time used:
##  Pre-processing    Running inla Post-processing           Total 
##          2.6730       5267.9094          3.1363       5273.7187 
## 
## Fixed effects:
##               mean     sd 0.025quant 0.5quant 0.975quant    mode kld
## intercept1 -4.4225 0.5151    -5.4338  -4.4225    -3.4120 -4.4225   0
## intercept2 -7.3257 0.5413    -8.3885  -7.3257    -6.2638 -7.3257   0
## 
## Random effects:
## Name   Model
##  field.det   RGeneric2 
## field.env   RGeneric2 
## Site   IID model 
## mMxTemp   RW1 model 
## mSnw5yr   RW1 model 
## Forest   RW1 model 
## field.det.c   Copy 
## 
## Model hyperparameters:
##                          mean     sd 0.025quant 0.5quant 0.975quant
## Theta1 for field.det   1.1411 0.1254     0.8858   1.1450     1.3775
## Theta2 for field.det   3.9267 0.2088     3.5177   3.9261     4.3394
## Theta1 for field.env  -1.9522 0.9262    -4.0960  -1.8032    -0.5817
## Theta2 for field.env   6.4758 0.6727     5.3566   6.4013     7.9609
## Precision for Site     0.4108 0.1125     0.2303   0.3975     0.6693
## Precision for mMxTemp  0.6715 0.2766     0.2788   0.6232     1.3436
## Precision for mSnw5yr  1.4437 0.6193     0.5902   1.3272     2.9793
## Precision for Forest   1.6529 0.6945     0.6886   1.5242     3.3690
## Beta for field.det.c   0.4868 0.0671     0.3581   0.4853     0.6222
##                          mode
## Theta1 for field.det   1.1590
## Theta2 for field.det   3.9241
## Theta1 for field.env  -1.1955
## Theta2 for field.env   6.1226
## Precision for Site     0.3724
## Precision for mMxTemp  0.5348
## Precision for mSnw5yr  1.1219
## Precision for Forest   1.2964
## Beta for field.det.c   0.4802
## 
## Expected number of effective parameters(std dev): 312.25(0.00)
## Number of equivalent replicates : 38.63 
## 
## Deviance Information Criterion (DIC) ...............: 1980.62
## Deviance Information Criterion (DIC, saturated) ....: NULL
## Effective number of parameters .....................: 446.85
## 
## Watanabe-Akaike information criterion (WAIC) ...: 2311.07
## Effective number of parameters .................: 494.28
## 
## Marginal log-Likelihood:  -23737.73 
## CPO and PIT are computed
## 
## Posterior marginals for linear predictor and fitted values computed

Combined Model

Also ran covariate model version without replications by region. Loading results:

load("~/Michigan_State/Sean/HPC/Feb10/Combined_02102018.RData")

Base Model (No Covariates, No Trials)

For Comparison.

JFrmB = Y ~ -1 + intercept3 + #Intercept (covariate level)
                f(field.base,  #spatial index for covariate level
                  model=barrier.model) 
  
  
#thetaJ = JModel.base$internal.summary.hyperpar$mean #from initial run (to speed up model)
thetaJ = c(1.444903, 4.073292)

JModel.base = inla(JFrmB, #formula
               data = inla.stack.data(base.stk), #data list object
               family = "binomial",#family 
               verbose = FALSE, #Show running process
               #Ntrials = inla.stack.data(Joint.stk)$Field.trials, #default is 1
               control.fixed = list(prec = 0.001, #priors for intercept
                          prec.intercept = 0.0001), 
               control.predictor = list(
                               A = inla.stack.A(base.stk), #data again
                         compute = TRUE, #estimate fitted values
                            link = 1), #transform fitted values from logit
               control.mode = list(restart = TRUE, theta = thetaJ), #to speed up
               control.inla = list(strategy="gaussian", #to speed up
                                   int.strategy = "eb"),
               control.results = list(return.marginals.random = TRUE, #results to report
                                      return.marginals.predictor = TRUE),
               control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE)) #calculate indicies for comparison

Results

Maximum Temperature Compare (Three Regions Vs. Combined)

Comb.plt.df = as.data.frame(JModel.full$summary.random$mMxTemp)[,1:6] #Get effect estimates (Full model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     #Clean labels
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 3501) #Add Region ID

Comb.plt.df2 = as.data.frame(JModel.comb$summary.random$mMxTemp)[,1:6] #Get effect estimates (No replicates model)
names(Comb.plt.df2) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df2$Region = "Combined" #ID

Comb.plt.df = rbind(Comb.plt.df, Comb.plt.df2) #Combine for plotting


Plt.df = Comb.plt.df
Plt.df$Region = as.factor(Plt.df$Region)

RAnge = range(Plt.df$ID)

Plt.all = ggplot(Plt.df, aes(ID, Mean, group = Region)) +
            geom_smooth(aes(col = Region, 
                      linetype= Region),
                      method = "loess",
                      span = 0.3,
                      se = FALSE,
                      lwd = 1) +
            scale_linetype_manual(values=c("solid", "longdash", "dotted", "dashed")) +
            scale_colour_manual(values=c("red", "brown", "blue", "black")) +
            geom_hline(yintercept = 0, 
                       linetype = "solid",
                       col = "lightgray",
                       size = 0.5) +  
            #geom_vline(xintercept = 0, 
            #           linetype = "dotted",
            #           col = "red",
            #           size = 0.5) +
            xlim(RAnge[1], RAnge[2]) +
            xlab("Mean Maximum Temperature (°C)") +
            ylab("Occurrence Probability (logit)") +  
            theme_classic() +
            theme(axis.text=element_text(size=16),
                   legend.title = element_text(size=16, face="bold"),
                  legend.key = element_rect(fill = "gray80", linetype=0),
                  legend.background = element_rect(fill = "gray80", linetype=0),
                  strip.text = element_text(face="bold", size = 20),
                  axis.title.y = element_text(face="bold", size = 20),
                  axis.title.x = element_text(face="bold", size = 20, vjust=-2))

Plt.all

Snow Weeks Compare (Three Regions Vs Combined)

Comb.plt.df = as.data.frame(JModel.full$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 90)

Comb.plt.df2 = as.data.frame(JModel.comb$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df2) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")

Comb.plt.df2$Region = "Combined"

Comb.plt.df = rbind(Comb.plt.df, Comb.plt.df2)


Plt.df = Comb.plt.df

Plt.df$Region = as.factor(Plt.df$Region)

RAnge = range(Plt.df$ID)

Plt.all = ggplot(Plt.df, aes(ID, Mean, group = Region)) +
            geom_smooth(aes(col = Region, 
                      linetype= Region),
                      method = "loess",
                      span = 0.3,
                      se = FALSE,
                      lwd = 1) +
            scale_linetype_manual(values=c("solid", "longdash", "dotted", "dashed")) +
            scale_colour_manual(values=c("red", "brown", "blue", "black")) +
            geom_hline(yintercept = 0, 
                       linetype = "solid",
                       col = "lightgray",
                       size = 0.5) +  
            geom_vline(xintercept = 0, 
                       linetype = "dotted",
                       col = "red",
                       size = 0.5) +
            xlim(RAnge[1], RAnge[2]) +
            xlab("Snow Weeks") +
            ylab("Occurrence Probability (logit)") +  
            theme_classic() +
            theme(axis.text=element_text(size=16),
                   legend.title = element_text(size=16, face="bold"),
                  legend.key = element_rect(fill = "gray80", linetype=0),
                  legend.background = element_rect(fill = "gray80", linetype=0),
                  strip.text = element_text(face="bold", size = 20),
                  axis.title.y = element_text(face="bold", size = 20),
                  axis.title.x = element_text(face="bold", size = 20, vjust=-2))

Plt.all

Compare Forest (Three Regions Vs. Combinedl)

Comb.plt.df = as.data.frame(JModel.full$summary.random$Forest)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 110)

Comb.plt.df2 = as.data.frame(JModel.comb$summary.random$Forest)[,1:6]
names(Comb.plt.df2) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")

Comb.plt.df2$Region = "Combined"

Plt.df = Comb.plt.df

Plt.df$Region = as.factor(Plt.df$Region)

RAnge = range(Plt.df$ID)

Plt.all = ggplot(Plt.df, aes(ID, Mean, group = Region)) +
            geom_smooth(aes(col = Region, 
                      linetype= Region),
                      method = "loess",
                      span = 0.3,
                      se = FALSE,
                      lwd = 1) +
            geom_smooth(data=Comb.plt.df2,
                          aes(ID, Mean, col = Region, 
                      linetype= Region),
                      method = "loess",
                      span = 0.6,
                      se = FALSE,
                      lwd = 1) +
            scale_linetype_manual(values=c("solid", "longdash", "dotted", "dashed")) +
            scale_colour_manual(values=c("red", "brown", "blue", "black")) +
            geom_hline(yintercept = 0, 
                       linetype = "solid",
                       col = "lightgray",
                       size = 0.5) +  
            geom_vline(xintercept = 0, 
                       linetype = "dotted",
                       col = "red",
                       size = 0.5) +
            xlim(RAnge[1], RAnge[2]) +
            xlab("Forest (1km Grid)") +
            ylab("Occurrence Probability (logit)") +  
            theme_classic() +
            theme(axis.text=element_text(size=16),
                   legend.title = element_text(size=16, face="bold"),
                  legend.key = element_rect(fill = "gray80", linetype=0),
                  legend.background = element_rect(fill = "gray80", linetype=0),
                  strip.text = element_text(face="bold", size = 20),
                  axis.title.y = element_text(face="bold", size = 20),
                  axis.title.x = element_text(face="bold", size = 20, vjust=-2))

Plt.all

Maximum Temperature Compare (Two Regions Vs. Combined)

Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$mMxTemp)[,1:6] #Get effect estimates (Full model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     #Clean labels
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 3502) #Add Region ID

Comb.plt.df2 = as.data.frame(JModel.comb$summary.random$mMxTemp)[,1:6] #Get effect estimates (No replicates model)
names(Comb.plt.df2) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df2$Region = "Combined" #ID

Comb.plt.df = rbind(Comb.plt.df, Comb.plt.df2) #Combine for plotting


Plt.df = Comb.plt.df
Plt.df$Region = as.factor(Plt.df$Region)

RAnge = range(Plt.df$ID)

Plt.all = ggplot(Plt.df, aes(ID, Mean, group = Region)) +
            geom_smooth(aes(col = Region, 
                      linetype= Region),
                      method = "loess",
                      span = 0.3,
                      se = FALSE,
                      lwd = 1) +
            scale_linetype_manual(values=c("solid", "longdash", "dotted", "dashed")) +
            scale_colour_manual(values=c("red", "brown", "blue", "black")) +
            geom_hline(yintercept = 0, 
                       linetype = "solid",
                       col = "lightgray",
                       size = 0.5) +  
            #geom_vline(xintercept = 0, 
            #           linetype = "dotted",
            #           col = "red",
            #           size = 0.5) +
            xlim(RAnge[1], RAnge[2]) +
            xlab("Mean Maximum Temperature (°C)") +
            ylab("Occurrence Probability (logit)") +  
            theme_classic() +
            theme(axis.text=element_text(size=16),
                   legend.title = element_text(size=16, face="bold"),
                  legend.key = element_rect(fill = "gray80", linetype=0),
                  legend.background = element_rect(fill = "gray80", linetype=0),
                  strip.text = element_text(face="bold", size = 20),
                  axis.title.y = element_text(face="bold", size = 20),
                  axis.title.x = element_text(face="bold", size = 20, vjust=-2))

Plt.all

Snow Weeks Compare (Three Regions Vs Combined)

Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 90)

Comb.plt.df2 = as.data.frame(JModel.comb$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df2) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")

Comb.plt.df2$Region = "Combined"

Comb.plt.df = rbind(Comb.plt.df, Comb.plt.df2)


Plt.df = Comb.plt.df

Plt.df$Region = as.factor(Plt.df$Region)

RAnge = range(Plt.df$ID)

Plt.all = ggplot(Plt.df, aes(ID, Mean, group = Region)) +
            geom_smooth(aes(col = Region, 
                      linetype= Region),
                      method = "loess",
                      span = 0.3,
                      se = FALSE,
                      lwd = 1) +
            scale_linetype_manual(values=c("solid", "longdash", "dotted", "dashed")) +
            scale_colour_manual(values=c("red", "brown", "blue", "black")) +
            geom_hline(yintercept = 0, 
                       linetype = "solid",
                       col = "lightgray",
                       size = 0.5) +  
            geom_vline(xintercept = 0, 
                       linetype = "dotted",
                       col = "red",
                       size = 0.5) +
            xlim(RAnge[1], RAnge[2]) +
            xlab("Snow Weeks") +
            ylab("Occurrence Probability (logit)") +  
            theme_classic() +
            theme(axis.text=element_text(size=16),
                   legend.title = element_text(size=16, face="bold"),
                  legend.key = element_rect(fill = "gray80", linetype=0),
                  legend.background = element_rect(fill = "gray80", linetype=0),
                  strip.text = element_text(face="bold", size = 20),
                  axis.title.y = element_text(face="bold", size = 20),
                  axis.title.x = element_text(face="bold", size = 20, vjust=-2))

Plt.all

Compare Forest (Three Regions Vs. Combinedl)

Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$Forest)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 110)

Comb.plt.df2 = as.data.frame(JModel.comb$summary.random$Forest)[,1:6]
names(Comb.plt.df2) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")

Comb.plt.df2$Region = "Combined"

Plt.df = Comb.plt.df

Plt.df$Region = as.factor(Plt.df$Region)

RAnge = range(Plt.df$ID)

Plt.all = ggplot(Plt.df, aes(ID, Mean, group = Region)) +
            geom_smooth(aes(col = Region, 
                      linetype= Region),
                      method = "loess",
                      span = 0.3,
                      se = FALSE,
                      lwd = 1) +
            geom_smooth(data=Comb.plt.df2,
                          aes(ID, Mean, col = Region, 
                      linetype= Region),
                      method = "loess",
                      span = 0.6,
                      se = FALSE,
                      lwd = 1) +
            scale_linetype_manual(values=c("solid", "longdash", "dotted", "dashed")) +
            scale_colour_manual(values=c("red", "brown", "blue", "black")) +
            geom_hline(yintercept = 0, 
                       linetype = "solid",
                       col = "lightgray",
                       size = 0.5) +  
            geom_vline(xintercept = 0, 
                       linetype = "dotted",
                       col = "red",
                       size = 0.5) +
            xlim(RAnge[1], RAnge[2]) +
            xlab("Forest (1km Grid)") +
            ylab("Occurrence Probability (logit)") +  
            theme_classic() +
            theme(axis.text=element_text(size=16),
                   legend.title = element_text(size=16, face="bold"),
                  legend.key = element_rect(fill = "gray80", linetype=0),
                  legend.background = element_rect(fill = "gray80", linetype=0),
                  strip.text = element_text(face="bold", size = 20),
                  axis.title.y = element_text(face="bold", size = 20),
                  axis.title.x = element_text(face="bold", size = 20, vjust=-2))

Plt.all

Detection Site Effect (Two Regions)

Significant.

PhySig.df2 = as.data.frame(JModel.2Reg$summary.random$Site)

names(PhySig.df2) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975", "mode", "kld")

PhySig.df2$PSigL = ifelse(PhySig.df2$Q025>0 & PhySig.df2$Q975>0, 1, 0)
PhySig.df2$PSigH = ifelse(PhySig.df2$Q025<0 & PhySig.df2$Q975<0, 1, 0)

PhySig.df3 = PhySig.df2 %>%
              filter(PSigL == 1 | PSigH == 1)

RM.m2 = ggplot(PhySig.df2, aes(x=ID, y=Mean)) + 
                geom_point(size=2, pch=1, col = "gray75") +
                geom_linerange(aes(ymin=Q025, ymax=Q975), colour="gray75") +
                geom_point(data=PhySig.df3, aes(x=ID, y=Mean), 
                           size=2, pch=19, col = "red") +
                geom_linerange(data=PhySig.df3, aes(ymin=Q025, ymax=Q975), colour="black") +
                geom_hline(yintercept = 0, 
                            linetype = "dotted",
                               colour = "red",
                               size = 0.75) +
                    theme_classic() +
                           xlab("Site Location") +
                           ylab("Detection Probabilty (logit)") + 
                           theme(axis.title.y = element_text(face="bold", size=14),
                                 axis.title.x = element_text(face="bold", size=14),
                                 axis.text.y = element_text(face="bold", size=12),
                                 axis.ticks.x=element_blank(),
                                 axis.text.x = element_blank())

RM.m2

Compare Spatial Fields

Pred.pnts = Grd.pnts  #Dense grid created during pre-processing
ModResult = JModel.full ##Three Model
ModResult2 = JModel.comb ##Combined Model
ModResult3 = JModel.2Reg ##Two Region Model
ModResult4 = JModel.base 

pLoc = cbind(Pred.pnts@coords[,1], Pred.pnts@coords[,2]) #coords
Ap = inla.spde.make.A(mesh, loc=pLoc) #Relate to mesh

Pred.pnts$Full = drop(Ap %*% ModResult$summary.random$field.env$mean) 

Pred.pnts$Reg2 = drop(Ap %*% ModResult3$summary.random$field.env$mean) 

Pred.pnts$Comb = drop(Ap %*% ModResult2$summary.random$field.env$mean) 

Pred.pnts$BaseEnv = drop(Ap %*% ModResult4$summary.random$field.base$mean) #Get random field by location

#Create rasters
Full.rf.r = rasterize(spTransform(
                      Pred.pnts,
                      CRS(proj4string(Domain.r))), 
                      Domain.r, 
                      "Full", 
                      background = NA)

Reg2.rf.r = rasterize(spTransform(
                      Pred.pnts,
                      CRS(proj4string(Domain.r))), 
                      Domain.r, 
                      "Reg2", 
                      background = NA)

Comb.rf.r = rasterize(spTransform(
                      Pred.pnts,
                      CRS(proj4string(Domain.r))), 
                      Domain.r, 
                      "Comb", 
                      background = NA)

Base.rf.r = rasterize(spTransform(
                      Pred.pnts,
                      CRS(proj4string(Domain.r))), 
                      Domain.r, 
                      "BaseEnv", 
                      background = NA)



RF.stk = stack(Base.rf.r, Comb.rf.r, Reg2.rf.r, Full.rf.r)
names(RF.stk) = c("Base", "Combined", "Reg2", "Reg3")


rng = seq(-1.4, 5.7, 0.1)

mCols = brewer.pal(11, "RdYlGn")
cr = rev(colorRampPalette(mCols)(n = 500))

cr = colorRampPalette(cr,  
          bias = 2.4, space = "rgb")

levelplot(RF.stk, 
          layout = c(2,2),
          margin = FALSE,
          xlab = NULL, 
          ylab = NULL, 
          names.attr = c("Base", "Combined", "Two Regions", "Three Regions"),
          maxpixels = 1e5,
          col.regions = cr, at = rng,
          colorkey = list(labels=list(cex=1.5),
                          space = "bottom"),
          par.strip.text = list(fontface='bold', cex=1.5),
          par.settings = list(axis.line = list(col = "black"),
                              strip.background = list(col = 'transparent'), 
                              strip.border = list(col = 'transparent')),
          scales = list(cex = 1.25)) + 
  latticeExtra::layer(sp.polygons(Domain, col = "black", lwd = 0.5)) +
  #latticeExtra::layer(sp.polygons(Hare.pnt , col = "black", pch=2, cex = 0.25)) +
  #latticeExtra::layer(sp.text(MyMatrix, txt = Domain$Name, 
  #                            pos =c(1,3,3,2,2,1,1), 
  #                            col="black",font=list(face="bold"), cex=1)) +
  latticeExtra::layer({SpatialPolygonsRescale(layout.north.arrow(),
                                              offset = c(-83, 45.5),
                                              scale = 2)})

Get Predictions

Three Region Model

Based on covariates only.

ModResult = JModel.full

#Update Region ID for prediction locations (MI and WI only)
MI_WI_Domain = subset(Domain, Name == "Michigan" | Name == "Wisconsin")

Pred.pnts = Grd.pnts

MI_WI_DomainP = spTransform(MI_WI_Domain, proj4string(Pred.pnts))

Pred.pnts$nDom = over(Pred.pnts, MI_WI_DomainP)[,1]

Pred.pntsN = subset(Pred.pnts, is.na(nDom) == FALSE)

Pred.pntsN$Region = ifelse(Pred.pntsN@data$nDom == 21 & Pred.pntsN@data$UP == "FALSE", "Upper", #FALSE indicates UP
                        ifelse(Pred.pntsN@data$nDom == 21 & Pred.pntsN@data$UP == "TRUE", "Lower",
                           ifelse(Pred.pntsN@data$nDom == 48, "Wisconsin", NA)))


#Get RF
pLoc = cbind(Pred.pntsN@coords[,1], Pred.pntsN@coords[,2])
Ap = inla.spde.make.A(mesh, loc=pLoc)

Pred.pntsN$Full.rf = drop(Ap %*% ModResult$summary.random$field.env$mean) 


#Get Temperature
Comb.plt.df = as.data.frame(JModel.full$summary.random$mMxTemp)[,1:6] #Get effect estimates (Full model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     #Clean labels
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 3501) 

#Region-sepcific estimates
UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")

UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]

#Look up UP Temp Estimates
Pred.pntsN$UP.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - UP.lu$ID)))
Pred.pntsN$UP.TmpEst = as.numeric(with(UP.lu,
                              Mean[match(Pred.pntsN$UP.temp, 
                                                       UP.POS)]))

#Look up Lower Temp Estimates
Pred.pntsN$LW.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
                              Mean[match(Pred.pntsN$LW.temp, 
                                                       LW.POS)]))


#Look up WI Temp Estimates
Pred.pntsN$WI.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
                              Mean[match(Pred.pntsN$WI.temp, 
                                                       WI.POS)]))

#Match to region
Pred.pntsN$Temp.Full = ifelse(Pred.pntsN$Region == "Upper", Pred.pntsN$UP.TmpEst,
                            ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst,  
                               ifelse(Pred.pntsN$Region == "Wisconsin", Pred.pntsN$WI.TmpEst, NA)))




#Get Snow Week (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.full$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 90) 

UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")

UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]

Pred.pntsN$UP.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - UP.lu$ID)))
Pred.pntsN$UP.TmpEst = as.numeric(with(UP.lu,
                              Mean[match(Pred.pntsN$UP.temp, 
                                                       UP.POS)]))

Pred.pntsN$LW.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
                              Mean[match(Pred.pntsN$LW.temp, 
                                                       LW.POS)]))


Pred.pntsN$WI.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
                              Mean[match(Pred.pntsN$WI.temp, 
                                                       WI.POS)]))

#Match to region
Pred.pntsN$Snow.Full = ifelse(Pred.pntsN$Region == "Upper", Pred.pntsN$UP.TmpEst,
                            ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst,  
                               ifelse(Pred.pntsN$Region == "Wisconsin", Pred.pntsN$WI.TmpEst, NA)))



#Get Forest (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.full$summary.random$Forest)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 110)

UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")

UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]

Pred.pntsN$UP.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - UP.lu$ID)))
Pred.pntsN$UP.TmpEst = as.numeric(with(UP.lu,
                              Mean[match(Pred.pntsN$UP.temp, 
                                                       UP.POS)]))

Pred.pntsN$LW.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
                              Mean[match(Pred.pntsN$LW.temp, 
                                                       LW.POS)]))


Pred.pntsN$WI.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
                              Mean[match(Pred.pntsN$WI.temp, 
                                                       WI.POS)]))

#Match to region
Pred.pntsN$Forest.Full = ifelse(Pred.pntsN$Region == "Upper", Pred.pntsN$UP.TmpEst,
                            ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst,  
                               ifelse(Pred.pntsN$Region == "Wisconsin", Pred.pntsN$WI.TmpEst, NA)))


Pred.pntsN@data = Pred.pntsN@data %>%
                       mutate(Pred.Full = plogis(Temp.Full + Snow.Full + Forest.Full))



Full.pred.r = rasterize(spTransform(
                      Pred.pntsN,
                      CRS(proj4string(Domain.r))), 
                      Domain.r, 
                      "Pred.Full", 
                      background = NA)

Combined Model Predictions

ModResult = JModel.comb

Pred.pnts = Pred.pntsN

#Get RF
pLoc = cbind(Pred.pntsN@coords[,1], Pred.pntsN@coords[,2])
Ap = inla.spde.make.A(mesh, loc=pLoc)

Pred.pntsN$Comb.rf = drop(Ap %*% ModResult$summary.random$field.env$mean) 


#Get Temperature
Comb.plt.df = as.data.frame(JModel.comb$summary.random$mMxTemp)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     

#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Pred.pntsN$UP.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Pred.pntsN$cmb.TmpEst = as.numeric(with(Comb.plt.df,
                              Mean[match(Pred.pntsN$UP.temp, 
                                                       Comb.POS)]))

#Get Snow
Comb.plt.df = as.data.frame(JModel.comb$summary.random$mSnw5yr)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     

#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Pred.pntsN$UP.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Pred.pntsN$Cmb.SnwEst = as.numeric(with(Comb.plt.df,
                              Mean[match(Pred.pntsN$UP.temp, 
                                                       Comb.POS)]))


#Get Forest
Comb.plt.df = as.data.frame(JModel.comb$summary.random$Forest)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     

#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Pred.pntsN$UP.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Pred.pntsN$Cmb.ForEst = as.numeric(with(Comb.plt.df,
                              Mean[match(Pred.pntsN$UP.temp, 
                                                       Comb.POS)]))

Pred.pntsN@data = Pred.pntsN@data %>%
                       mutate(Pred.Comb = plogis(cmb.TmpEst + Cmb.SnwEst + Cmb.ForEst))



Comb.pred.r = rasterize(spTransform(
                      Pred.pntsN,
                      CRS(proj4string(Domain.r))), 
                      Domain.r, 
                      "Pred.Comb", 
                      background = NA)

Two Region Model

Based on covariates only.

ModResult = JModel.2Reg

#Update Region ID for prediction locations (MI and WI only)
MI_WI_Domain = subset(Domain, Name == "Michigan" | Name == "Wisconsin")

Pred.pnts = Grd.pnts

MI_WI_DomainP = spTransform(MI_WI_Domain, proj4string(Pred.pnts))

Pred.pnts$nDom = over(Pred.pnts, MI_WI_DomainP)[,1]

Pred.pntsN = subset(Pred.pnts, is.na(nDom) == FALSE)

Pred.pntsN$Region = ifelse(Pred.pntsN@data$nDom == 21 & Pred.pntsN@data$UP == "FALSE", "Upper", #FALSE indicates UP
                        ifelse(Pred.pntsN@data$nDom == 21 & Pred.pntsN@data$UP == "TRUE", "Lower",
                           ifelse(Pred.pntsN@data$nDom == 48, "Wisconsin", NA)))


Pred.pntsN$Region = ifelse(Pred.pntsN$Region == "Upper" | Pred.pntsN$Region == "Wisconsin", "Wisconsin + UP", Pred.pntsN$Region)

#Get RF
pLoc = cbind(Pred.pntsN@coords[,1], Pred.pntsN@coords[,2])
Ap = inla.spde.make.A(mesh, loc=pLoc)

Pred.pntsN$REg2.rf = drop(Ap %*% ModResult$summary.random$field.env$mean) 


#Get Temperature
Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$mMxTemp)[,1:6] #Get effect estimates (Full model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     #Clean labels
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 3502) 

#Region-sepcific estimates
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin + Upper")

LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]

#Look up Lower Temp Estimates
Pred.pntsN$LW.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
                              Mean[match(Pred.pntsN$LW.temp, 
                                                       LW.POS)]))


#Look up WI + UP Temp Estimates
Pred.pntsN$WI.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
                              Mean[match(Pred.pntsN$WI.temp, 
                                                       WI.POS)]))

#Match to region
Pred.pntsN$Temp.Full = ifelse(Pred.pntsN$Region == "Wisconsin + UP", Pred.pntsN$WI.TmpEst,
                            ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst, NA))




#Get Snow Week (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 90) 

LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin + Upper")

LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]

Pred.pntsN$LW.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
                              Mean[match(Pred.pntsN$LW.temp, 
                                                       LW.POS)]))


Pred.pntsN$WI.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
                              Mean[match(Pred.pntsN$WI.temp, 
                                                       WI.POS)]))

#Match to region
Pred.pntsN$Snow.Full = ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst,  
                               ifelse(Pred.pntsN$Region == "Wisconsin + UP", Pred.pntsN$WI.TmpEst, NA))



#Get Forest (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$Forest)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 110)

LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin + Upper")

LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]

Pred.pntsN$LW.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
                              Mean[match(Pred.pntsN$LW.temp, 
                                                       LW.POS)]))


Pred.pntsN$WI.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
                              Mean[match(Pred.pntsN$WI.temp, 
                                                       WI.POS)]))

#Match to region
Pred.pntsN$Forest.Full = ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst,  
                               ifelse(Pred.pntsN$Region == "Wisconsin + UP", Pred.pntsN$WI.TmpEst, NA))


Pred.pntsN@data = Pred.pntsN@data %>%
                       mutate(Pred.Full = plogis(Temp.Full + Snow.Full + Forest.Full))



Reg2.pred.r = rasterize(spTransform(
                      Pred.pntsN,
                      CRS(proj4string(Domain.r))), 
                      Domain.r, 
                      "Pred.Full", 
                      background = NA)

Compare Predictions

Pred.stk = stack(Comb.pred.r, Reg2.pred.r, Full.pred.r)
names(Pred.stk) = c("Combined", "REg2", "Reg3")

rng = seq(0, 1, 0.01)

mCols = brewer.pal(9, "YlOrBr")
cr = colorRampPalette(mCols)(n = 500)

cr = colorRampPalette(cr,  
          bias = 1, space = "rgb")

levelplot(Pred.stk, #Trial.comp,
          layout = c(3,1),
          margin = FALSE,
          xlab = NULL, 
          ylab = NULL, 
          #main = "Occurrence Probabilty",
          names.attr= c("Combined", "Two Region", "Three Region"),
          maxpixels = 1e5,
          col.regions = cr, at = rng,
          colorkey = list(labels=list(cex=1.5),
                          space = "bottom"),
          par.strip.text = list(fontface='bold', cex=1.5),
          par.settings = list(axis.line = list(col = "black"),
                              strip.background = list(col = 'transparent'), 
                              strip.border = list(col = 'transparent')),
          scales = list(cex = 1.25)) + 
  latticeExtra::layer(sp.polygons(Domain, col = "black", lwd = 1)) +
  #latticeExtra::layer(sp.polygons(Hare.pnt , col = "black", pch=2, cex = 0.25)) +
  latticeExtra::layer({SpatialPolygonsRescale(layout.north.arrow(),
                                              offset = c(-83, 45.5),
                                              scale = 2)})

Compare Model Metrics

Prediction for Sample Locations

Obs.set = subset(HareMesh.mod, OBS == 1)

Obs.set = Validation.set2

#Obs.set = subset(Obs.set, OBS == 1)

#Update Region ID for prediction locations (MI and WI only)
MI_WI_Domain = subset(Domain, Name == "Michigan" | Name == "Wisconsin")

Obs.set$nDom = over(Obs.set, MI_WI_DomainP)[,1]

Obs.set = subset(Obs.set, is.na(nDom) == FALSE)

Obs.set$Region = ifelse(Obs.set@data$nDom == 21 & Obs.set@data$UP == "FALSE", "Upper", #FALSE indicates UP
                        ifelse(Obs.set@data$nDom == 21 & Obs.set@data$UP == "TRUE", "Lower",
                           ifelse(Obs.set@data$nDom == 48, "Wisconsin", NA)))


Obs.set = Obs.set@data

dim(Obs.set)
## [1] 4055   20
sum(Obs.set$OBS)
## [1] 1026
Obs.set %>%
  group_by(Region) %>%
  summarise(Count = length(Region))
## # A tibble: 3 x 2
##   Region    Count
##   <chr>     <int>
## 1 Lower      1153
## 2 Upper      1364
## 3 Wisconsin  1538
#Get Temperature
Comb.plt.df = as.data.frame(JModel.full$summary.random$mMxTemp)[,1:6] #Get effect estimates (Full model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     #Clean labels
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 3501) 

#Region-sepcific estimates
UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")

UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]

#Look up UP Temp Estimates
Obs.set$UP.temp = sapply(Obs.set$mxTempE, function(x)which.min(abs(x - UP.lu$ID)))
Obs.set$UP.TmpEst = as.numeric(with(UP.lu,
                              Mean[match(Obs.set$UP.temp, 
                                                       UP.POS)]))

#Look up Lower Temp Estimates
Obs.set$LW.temp = sapply(Obs.set$mxTempE, function(x)which.min(abs(x - LW.lu$ID)))
Obs.set$LW.TmpEst = as.numeric(with(LW.lu,
                              Mean[match(Obs.set$LW.temp, 
                                                       LW.POS)]))


#Look up WI Temp Estimates
Obs.set$WI.temp = sapply(Obs.set$mxTempE, function(x)which.min(abs(x - WI.lu$ID)))
Obs.set$WI.TmpEst = as.numeric(with(WI.lu,
                              Mean[match(Obs.set$WI.temp, 
                                                       WI.POS)]))

#Match to region
Obs.set$Temp.Full = ifelse(Obs.set$Region == "Upper", Obs.set$UP.TmpEst,
                            ifelse(Obs.set$Region == "Lower", Obs.set$LW.TmpEst,  
                               ifelse(Obs.set$Region == "Wisconsin", Obs.set$WI.TmpEst, NA)))




#Get Snow Week (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.full$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 90) 

UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")

UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]

Obs.set$UP.temp = sapply(Obs.set$mSnow5yrE, function(x)which.min(abs(x - UP.lu$ID)))
Obs.set$UP.TmpEst = as.numeric(with(UP.lu,
                              Mean[match(Obs.set$UP.temp, 
                                                       UP.POS)]))

Obs.set$LW.temp = sapply(Obs.set$mSnow5yrE, function(x)which.min(abs(x - LW.lu$ID)))
Obs.set$LW.TmpEst = as.numeric(with(LW.lu,
                              Mean[match(Obs.set$LW.temp, 
                                                       LW.POS)]))


Obs.set$WI.temp = sapply(Obs.set$mSnow5yrE, function(x)which.min(abs(x - WI.lu$ID)))
Obs.set$WI.TmpEst = as.numeric(with(WI.lu,
                              Mean[match(Obs.set$WI.temp, 
                                                       WI.POS)]))

#Match to region
Obs.set$Snow.Full = ifelse(Obs.set$Region == "Upper", Obs.set$UP.TmpEst,
                            ifelse(Obs.set$Region == "Lower", Obs.set$LW.TmpEst,  
                               ifelse(Obs.set$Region == "Wisconsin", Obs.set$WI.TmpEst, NA)))



#Get Forest (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.full$summary.random$Forest)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 110)

UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")

UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]

Obs.set$UP.temp = sapply(Obs.set$Forest1kmE, function(x)which.min(abs(x - UP.lu$ID)))
Obs.set$UP.TmpEst = as.numeric(with(UP.lu,
                              Mean[match(Obs.set$UP.temp, 
                                                       UP.POS)]))

Obs.set$LW.temp = sapply(Obs.set$Forest1kmE, function(x)which.min(abs(x - LW.lu$ID)))
Obs.set$LW.TmpEst = as.numeric(with(LW.lu,
                              Mean[match(Obs.set$LW.temp, 
                                                       LW.POS)]))


Obs.set$WI.temp = sapply(Obs.set$Forest1kmE, function(x)which.min(abs(x - WI.lu$ID)))
Obs.set$WI.TmpEst = as.numeric(with(WI.lu,
                              Mean[match(Obs.set$WI.temp, 
                                                       WI.POS)]))

#Match to region
Obs.set$Forest.Full = ifelse(Obs.set$Region == "Upper", Obs.set$UP.TmpEst,
                            ifelse(Obs.set$Region == "Lower", Obs.set$LW.TmpEst,  
                               ifelse(Obs.set$Region == "Wisconsin", Obs.set$WI.TmpEst, NA)))


Obs.set = Obs.set %>%
          mutate(Pred.Full = plogis(Temp.Full + Snow.Full + Forest.Full))

Obs.set.R3.keep = Obs.set
#Obs.set$Prop = Obs.set$Counts/Obs.set$Trials

#Full.BS = mean((Obs.set$Pred.Full - Obs.set$Prop)^2)
#Full.BS

Full.PA = as.data.frame(cbind(1:dim(Obs.set)[1], Obs.set$OBS, Obs.set$Pred.Full))
names(Full.PA) = c("ID", "OBS", "Pred")

Thresh = optimal.thresholds(Full.PA, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh
##         Method Pred
## 1 MaxSens+Spec 0.94
## 2 PredPrev=Obs 0.98
Full.PA.out = presence.absence.accuracy(Full.PA, threshold = Thresh[1,2])

#Calculate TSS
Full.PA.out$TSS = Full.PA.out$sensitivity + Full.PA.out$specificity - 1




#Combined Model
#Get Temperature
Comb.plt.df = as.data.frame(JModel.comb$summary.random$mMxTemp)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     

#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Obs.set$UP.temp = sapply(Obs.set$mxTempE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Obs.set$cmb.TmpEst = as.numeric(with(Comb.plt.df,
                              Mean[match(Obs.set$UP.temp, 
                                                       Comb.POS)]))

#Get Snow
Comb.plt.df = as.data.frame(JModel.comb$summary.random$mSnw5yr)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     

#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Obs.set$UP.temp = sapply(Obs.set$mSnow5yrE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Obs.set$Cmb.SnwEst = as.numeric(with(Comb.plt.df,
                              Mean[match(Obs.set$UP.temp, 
                                                       Comb.POS)]))


#Get Forest
Comb.plt.df = as.data.frame(JModel.comb$summary.random$Forest)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     

#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Obs.set$UP.temp = sapply(Obs.set$Forest1kmE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Obs.set$Cmb.ForEst = as.numeric(with(Comb.plt.df,
                              Mean[match(Obs.set$UP.temp, 
                                                       Comb.POS)]))


Obs.set = Obs.set %>%
          mutate(Pred.Comb = plogis(cmb.TmpEst + Cmb.SnwEst + Cmb.ForEst))

Obs.set.comb.keep = Obs.set

#Comb.BS = mean((Obs.set$Pred.Comb - Obs.set$Prop)^2, na.rm=T)
#Comb.BS

Comb.PA = as.data.frame(cbind((1:dim(Obs.set)[1]), Obs.set$OBS, Obs.set$Pred.Comb))
names(Comb.PA) = c("ID", "OBS", "Pred")

Thresh2 = optimal.thresholds(Comb.PA, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh2
##         Method Pred
## 1 MaxSens+Spec 0.97
## 2 PredPrev=Obs 0.98
Comb.PA.out = presence.absence.accuracy(Comb.PA, threshold = Thresh2[1,2])

#Calculate TSS
Comb.PA.out$TSS = Comb.PA.out$sensitivity + Comb.PA.out$specificity - 1


################################################################################################################
##TWo Region
ModResult = JModel.2Reg
Obs.set = Validation.set2

#Update Region ID for prediction locations (MI and WI only)
MI_WI_Domain = subset(Domain, Name == "Michigan" | Name == "Wisconsin")

MI_WI_DomainP = spTransform(MI_WI_Domain, proj4string(Obs.set))

Obs.set$nDom = over(Obs.set, MI_WI_DomainP)[,1]

Obs.setN = subset(Obs.set, is.na(nDom) == FALSE)

Obs.setN$Region = ifelse(Obs.setN@data$nDom == 21 & Obs.setN@data$UP == "FALSE", "Upper", #FALSE indicates UP
                        ifelse(Obs.setN@data$nDom == 21 & Obs.setN@data$UP == "TRUE", "Lower",
                           ifelse(Obs.setN@data$nDom == 48, "Wisconsin", NA)))


Obs.setN$Region = ifelse(Obs.setN$Region == "Upper" | Obs.setN$Region == "Wisconsin", "Wisconsin + UP", Obs.setN$Region)

#Get RF
pLoc = cbind(Obs.setN@coords[,1], Obs.setN@coords[,2])
Ap = inla.spde.make.A(mesh, loc=pLoc)

Obs.setN$REg2.rf = drop(Ap %*% ModResult$summary.random$field.env$mean) 


#Get Temperature
Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$mMxTemp)[,1:6] #Get effect estimates (Full model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     #Clean labels
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 3502) 

#Region-sepcific estimates
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin + Upper")

LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]

#Look up Lower Temp Estimates
Obs.setN$LW.temp = sapply(Obs.setN$mxTempE, function(x)which.min(abs(x - LW.lu$ID)))
Obs.setN$LW.TmpEst = as.numeric(with(LW.lu,
                              Mean[match(Obs.setN$LW.temp, 
                                                       LW.POS)]))


#Look up WI + UP Temp Estimates
Obs.setN$WI.temp = sapply(Obs.setN$mxTempE, function(x)which.min(abs(x - WI.lu$ID)))
Obs.setN$WI.TmpEst = as.numeric(with(WI.lu,
                              Mean[match(Obs.setN$WI.temp, 
                                                       WI.POS)]))

#Match to region
Obs.setN$Temp.Full = ifelse(Obs.setN$Region == "Wisconsin + UP", Obs.setN$WI.TmpEst,
                            ifelse(Obs.setN$Region == "Lower", Obs.setN$LW.TmpEst, NA))




#Get Snow Week (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 90) 

LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin + Upper")

LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]

Obs.setN$LW.temp = sapply(Obs.setN$mSnow5yrE, function(x)which.min(abs(x - LW.lu$ID)))
Obs.setN$LW.TmpEst = as.numeric(with(LW.lu,
                              Mean[match(Obs.setN$LW.temp, 
                                                       LW.POS)]))


Obs.setN$WI.temp = sapply(Obs.setN$mSnow5yrE, function(x)which.min(abs(x - WI.lu$ID)))
Obs.setN$WI.TmpEst = as.numeric(with(WI.lu,
                              Mean[match(Obs.setN$WI.temp, 
                                                       WI.POS)]))

#Match to region
Obs.setN$Snow.Full = ifelse(Obs.setN$Region == "Lower", Obs.setN$LW.TmpEst,  
                               ifelse(Obs.setN$Region == "Wisconsin + UP", Obs.setN$WI.TmpEst, NA))



#Get Forest (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$Forest)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 110)

LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin + Upper")

LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]

Obs.setN$LW.temp = sapply(Obs.setN$Forest1kmE, function(x)which.min(abs(x - LW.lu$ID)))
Obs.setN$LW.TmpEst = as.numeric(with(LW.lu,
                              Mean[match(Obs.setN$LW.temp, 
                                                       LW.POS)]))


Obs.setN$WI.temp = sapply(Obs.setN$Forest1kmE, function(x)which.min(abs(x - WI.lu$ID)))
Obs.setN$WI.TmpEst = as.numeric(with(WI.lu,
                              Mean[match(Obs.setN$WI.temp, 
                                                       WI.POS)]))

#Match to region
Obs.setN$Forest.Full = ifelse(Obs.setN$Region == "Lower", Obs.setN$LW.TmpEst,  
                               ifelse(Obs.setN$Region == "Wisconsin + UP", Obs.setN$WI.TmpEst, NA))


Obs.setN@data = Obs.setN@data %>%
                       mutate(Pred.Full = plogis(Temp.Full + Snow.Full + Forest.Full))


Obs.set.R2.keep = Obs.setN

Reg2.PA = as.data.frame(cbind(1:dim(Obs.setN)[1], Obs.setN$OBS, Obs.setN$Pred.Full))
names(Reg2.PA) = c("ID", "OBS", "Pred")

Thresh = optimal.thresholds(Reg2.PA, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh
##         Method Pred
## 1 MaxSens+Spec 0.95
## 2 PredPrev=Obs 0.97
Reg2.PA.out = presence.absence.accuracy(Reg2.PA, threshold = Thresh[1,2])

#Calculate TSS
Reg2.PA.out$TSS = Reg2.PA.out$sensitivity + Reg2.PA.out$specificity - 1


Comb.PA.out
##   model threshold       PCC sensitivity specificity     Kappa       AUC
## 1  Pred      0.97 0.7810111   0.9269006   0.7315946 0.5319769 0.8581566
##        PCC.sd sensitivity.sd specificity.sd   Kappa.sd      AUC.sd
## 1 0.006495279    0.008130394    0.008052912 0.01274496 0.005641799
##         TSS
## 1 0.6584952
Reg2.PA.out
##   model threshold       PCC sensitivity specificity     Kappa       AUC
## 1  Pred      0.95 0.7674476   0.9717349   0.6982502 0.5213086 0.8218957
##       PCC.sd sensitivity.sd specificity.sd   Kappa.sd      AUC.sd
## 1 0.00663503    0.005176513    0.008341633 0.01219265 0.006383383
##         TSS
## 1 0.6699851
Full.PA.out
##   model threshold       PCC sensitivity specificity     Kappa      AUC
## 1  Pred      0.94 0.7528977   0.9161793     0.69759 0.4842459 0.823613
##        PCC.sd sensitivity.sd specificity.sd   Kappa.sd      AUC.sd
## 1 0.006774302    0.008655733    0.008346805 0.01274623 0.006437902
##         TSS
## 1 0.6137693

Model Compare

#Environment Level
Compare.tab = as.data.frame(matrix(ncol = 7, nrow= 3))
names(Compare.tab) = c("Model", "DIC", "WAIC", "Sensitivity", "Specificity", "AUC", "TSS")

Compare.tab[1, 1] = "Combined"
Compare.tab[1, 2] = round(GetMets(JModel.comb)[1,3],2)
Compare.tab[1, 3] = round(GetMets(JModel.comb)[2,3],2)
Compare.tab[1, 4] = round(Comb.PA.out[1,4], 3)
Compare.tab[1, 5] = round(Comb.PA.out[1,5], 3)
Compare.tab[1, 6] = round(Comb.PA.out[1,7], 3)
Compare.tab[1, 7] = round(Comb.PA.out[1,13], 3)

Compare.tab[2, 1] = "Two Region"
Compare.tab[2, 2] = round(GetMets(JModel.2Reg)[1,3],2)
Compare.tab[2, 3] = round(GetMets(JModel.2Reg)[2,3],2)
Compare.tab[2, 4] = round(Reg2.PA.out[1,4], 3)
Compare.tab[2, 5] = round(Reg2.PA.out[1,5], 3)
Compare.tab[2, 6] = round(Reg2.PA.out[1,7], 3)
Compare.tab[2, 7] = round(Reg2.PA.out[1,13], 3)

Compare.tab[3, 1] = "Three Region"
Compare.tab[3, 2] = round(GetMets(JModel.full)[1,3],2)
Compare.tab[3, 3] = round(GetMets(JModel.full)[2,3],2)
Compare.tab[3, 4] = round(Full.PA.out[1,4], 3)
Compare.tab[3, 5] = round(Full.PA.out[1,5], 3)
Compare.tab[3, 6] = round(Full.PA.out[1,7], 3)
Compare.tab[3, 7] = round(Full.PA.out[1,13], 3)



#Detection Level
Compare.tab1 = as.data.frame(matrix(ncol = 3, nrow= 3))
names(Compare.tab1) = c("Model", "DIC", "WAIC")

Compare.tab1[1, 1] = "Combined"
Compare.tab1[1, 2] = round(GetMets(JModel.comb)[1,2],2)
Compare.tab1[1, 3] = round(GetMets(JModel.comb)[2,2],2)

Compare.tab1[2, 1] = "Two Region"
Compare.tab1[2, 2] = round(GetMets(JModel.2Reg)[1,2],2)
Compare.tab1[2, 3] = round(GetMets(JModel.2Reg)[2,2],2)

Compare.tab1[3, 1] = "Three Region"
Compare.tab1[3, 2] = round(GetMets(JModel.full)[1,2],2)
Compare.tab1[3, 3] = round(GetMets(JModel.full)[2,2],2)
kable(Compare.tab, caption = "Environment Level") %>%
      kable_styling("striped", full_width = F) %>%
      row_spec(0, font_size = 20) %>%
      column_spec(1, bold = T)
Environment Level
Model DIC WAIC Sensitivity Specificity AUC TSS
Combined 3494.14 3735.14 0.927 0.732 0.858 0.658
Two Region 1127.21 1024.02 0.972 0.698 0.822 0.670
Three Region 1207.91 1094.29 0.916 0.698 0.824 0.614
kable(Compare.tab1, caption = "Detection Level") %>%
      kable_styling("striped", full_width = F) %>%
      row_spec(0, font_size = 20) %>%
      column_spec(1, bold = T)
Detection Level
Model DIC WAIC
Combined 859.43 1740.38
Two Region 853.41 1287.06
Three Region 841.51 1191.30

Compare Model Metrics by Region

#Two Regions
#"Wisconsin + UP"
Reg2.WI_UP = Obs.set.R2.keep@data %>% filter(Region == "Wisconsin + UP")

Reg2.WI_UP = as.data.frame(cbind(1:dim(Reg2.WI_UP)[1], Reg2.WI_UP$OBS, Reg2.WI_UP$Pred.Full))
names(Reg2.WI_UP) = c("ID", "OBS", "Pred")

Thresh = optimal.thresholds(Reg2.WI_UP, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh
##         Method Pred
## 1 MaxSens+Spec 0.95
## 2 PredPrev=Obs 0.97
Reg2.WI_UP.out = presence.absence.accuracy(Reg2.WI_UP, threshold = Thresh[1,2])

#Calculate TSS
Reg2.WI_UP.out$TSS = Reg2.WI_UP.out$sensitivity + Reg2.WI_UP.out$specificity - 1

Reg2.WI_UP.out$Region = "Wisconsin + UP"


#"Lower"
Reg2.WI_Low = Obs.set.R2.keep@data %>% filter(Region == "Lower")

Reg2.WI_Low = as.data.frame(cbind(1:dim(Reg2.WI_Low)[1], Reg2.WI_Low$OBS, Reg2.WI_Low$Pred.Full))
names(Reg2.WI_Low) = c("ID", "OBS", "Pred")

Thresh = optimal.thresholds(Reg2.WI_Low, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh
##         Method Pred
## 1 MaxSens+Spec 0.91
## 2 PredPrev=Obs 0.99
Reg2.WI_Low.out = presence.absence.accuracy(Reg2.WI_Low, threshold = Thresh[1,2])

#Calculate TSS
Reg2.WI_Low.out$TSS = Reg2.WI_Low.out$sensitivity + Reg2.WI_Low.out$specificity - 1

Reg2.WI_Low.out$Region = "Lower"



#Three Regions Model
##"Wisconsin"
Reg3.WI = Obs.set.R3.keep %>% filter(Region == "Wisconsin")

Reg3.WI = as.data.frame(cbind(1:dim(Reg3.WI)[1], Reg3.WI$OBS, Reg3.WI$Pred.Full))
names(Reg3.WI) = c("ID", "OBS", "Pred")

Thresh = optimal.thresholds(Reg3.WI, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh
##         Method Pred
## 1 MaxSens+Spec 0.96
## 2 PredPrev=Obs 0.99
Reg3.WI.out = presence.absence.accuracy(Reg3.WI, threshold = Thresh[1,2])

#Calculate TSS
Reg3.WI.out$TSS = Reg3.WI.out$sensitivity + Reg3.WI.out$specificity - 1

Reg3.WI.out$Region = "Wisconsin"


##"Upper"
Reg3.UP = Obs.set.R3.keep %>% filter(Region == "Upper")

Reg3.UP = as.data.frame(cbind(1:dim(Reg3.UP)[1], Reg3.UP$OBS, Reg3.UP$Pred.Full))
names(Reg3.UP) = c("ID", "OBS", "Pred")

Thresh = optimal.thresholds(Reg3.UP, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh
##         Method Pred
## 1 MaxSens+Spec 0.97
## 2 PredPrev=Obs 0.97
Reg3.UP.out = presence.absence.accuracy(Reg3.UP, threshold = Thresh[1,2])

#Calculate TSS
Reg3.UP.out$TSS = Reg3.UP.out$sensitivity + Reg3.UP.out$specificity - 1

Reg3.UP.out$Region = "Upper"


##"Lower"
Reg3.low = Obs.set.R3.keep %>% filter(Region == "Lower")

Reg3.low = as.data.frame(cbind(1:dim(Reg3.low)[1], Reg3.low$OBS, Reg3.low$Pred.Full))
names(Reg3.low) = c("ID", "OBS", "Pred")

Thresh = optimal.thresholds(Reg3.low, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh
##         Method Pred
## 1 MaxSens+Spec 0.94
## 2 PredPrev=Obs 0.99
Reg3.low.out = presence.absence.accuracy(Reg3.low, threshold = Thresh[1,2])

#Calculate TSS
Reg3.low.out$TSS = Reg3.low.out$sensitivity + Reg3.low.out$specificity - 1

Reg3.low.out$Region = "Lower"

#Combined Model ##############################################
#Obs.set.comb.keep
##"Wisconsin"
CombWI = Obs.set.comb.keep %>% filter(StateUP == "Wisconsin")

CombWI = as.data.frame(cbind(1:dim(CombWI)[1], CombWI$OBS, CombWI$Pred.Comb))
names(CombWI) = c("ID", "OBS", "Pred")

Thresh = optimal.thresholds(CombWI, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh
##         Method Pred
## 1 MaxSens+Spec 0.97
## 2 PredPrev=Obs 0.94
CombWI.out = presence.absence.accuracy(CombWI, threshold = Thresh[1,2])

#Calculate TSS
CombWI.out$TSS = CombWI.out$sensitivity + CombWI.out$specificity - 1

CombWI.out$Region = "Wisconsin"


##"Upper"
CombUP = Obs.set.comb.keep %>% filter(StateUP == "Mich.UP")

CombUP = as.data.frame(cbind(1:dim(CombUP)[1], CombUP$OBS, CombUP$Pred.Comb))
names(CombUP) = c("ID", "OBS", "Pred")

Thresh = optimal.thresholds(CombUP, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh
##         Method Pred
## 1 MaxSens+Spec 0.97
## 2 PredPrev=Obs 0.97
CombUP.out = presence.absence.accuracy(CombUP, threshold = Thresh[1,2])

#Calculate TSS
CombUP.out$TSS = CombUP.out$sensitivity + CombUP.out$specificity - 1

CombUP.out$Region = "Upper"


##"Lower"
Comblow = Obs.set.comb.keep %>% filter(StateUP == "Michigan")

Comblow = as.data.frame(cbind(1:dim(Comblow)[1], Comblow$OBS, Comblow$Pred.Comb))
names(Comblow) = c("ID", "OBS", "Pred")

Thresh = optimal.thresholds(Comblow, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh
##         Method Pred
## 1 MaxSens+Spec 0.94
## 2 PredPrev=Obs 0.96
Comblow.out = presence.absence.accuracy(Comblow, threshold = Thresh[1,2])

#Calculate TSS
Comblow.out$TSS = Comblow.out$sensitivity + Comblow.out$specificity - 1

Comblow.out$Region = "Lower"


#Create Table
Reg2.WI_UP.out$Model = "Two Region"
Reg2.WI_Low.out$Model = "Two Region"


Reg3.WI.out$Model = "Three Region"
Reg3.UP.out$Model = "Three Region"
Reg3.low.out$Model = "Three Region"

CombWI.out$Model = "Combined"
CombUP.out$Model = "Combined"
Comblow.out$Model = "Combined"

All.data = rbind(CombWI.out, CombUP.out, Comblow.out, Reg2.WI_Low.out, Reg2.WI_UP.out, 
                 Reg3.WI.out, Reg3.UP.out, Reg3.low.out)

All.data = All.data %>% select(Model, Region, sensitivity, specificity, AUC, TSS)
names(All.data) = c("Model", "Region", "Sensitivity", "Specificity", "AUC", "TSS")

kable(All.data, caption = "Metrics by Region") %>%
      kable_styling("striped", full_width = F) %>%
      row_spec(0, font_size = 20) %>%
      column_spec(1, bold = T)
Metrics by Region
Model Region Sensitivity Specificity AUC TSS
Combined Wisconsin 0.7045455 0.4285714 0.5324675 0.1331169
Combined Upper 0.9371728 0.4841076 0.7641854 0.4212804
Combined Lower 0.9629630 0.2500000 0.5740741 0.2129630
Two Region Lower 1.0000000 0.8188277 0.9050391 0.8188277
Two Region Wisconsin + UP 0.9729730 0.5937993 0.7718857 0.5667722
Three Region Wisconsin 0.4545455 0.7891566 0.6164126 0.2437021
Three Region Upper 0.7476440 0.4449878 0.6088327 0.1926318
Three Region Lower 1.0000000 0.8188277 0.8923097 0.8188277