Snowshoe Hare in the Upper Midwest

Non-stationary models with physical barriers and non-linear covariates

Set Directory

setwd("~/Michigan_State/Sean")

Load and Prep Observation Data

Michigan

Loading raw Michigan data, dropping previously acquired environmental covariates and summing trial successes.

MI.df = read.csv("./Data_102418/mi_data10-24-18.csv",  
                   header = TRUE, stringsAsFactors = FALSE, sep=",") 

MI.df = MI.df %>%
        mutate(Long = Lat, #these appear reversed in original data
               Lat = Lon,
               State = "Michigan", #transect state
               OBS = Hare, #overall presence (at any trial)
               Counts = T.1 + T.2 + T.3 + T.4 + T.5 + T.6 + T.7 + T.8 + T.9, #Sum across trials
               Trials = 9, #No NAs in this dataset all have 9 trials
               Site2 = paste("M", 1:nrow(MI.df), sep=".")) %>%
        select(Site2, Long, Lat, State, OBS, Counts, Trials)

dim(MI.df) #Check data
## [1] 125   7
head(MI.df)
##   Site2      Long      Lat    State OBS Counts Trials
## 1   M.1 -88.24388 47.41681 Michigan   1      6      9
## 2   M.2 -84.58309 45.53829 Michigan   1      6      9
## 3   M.3 -84.14892 45.46854 Michigan   1      6      9
## 4   M.4 -90.07851 46.35078 Michigan   1      1      9
## 5   M.5 -86.56820 46.37911 Michigan   1      1      9
## 6   M.6 -86.75846 46.25573 Michigan   1      9      9

Wisconsin

Loading data as above, but trials vary by transect, therefore, they are individually summed.

WI.df = read.csv("./Data_102418/wi_data10-24-18.csv",  
                   header = TRUE, stringsAsFactors = FALSE, sep=",")

WI.Trial.df = WI.df[2:21] #pull-out transects

WI.Trial.df[WI.Trial.df >= 1] = 1 #set track counts to 1

WI.Trial.df$Counts = rowSums(WI.Trial.df, na.rm=T) #sum total successes by transect

WI.Trial.df$Trials = rowSums(is.na(WI.df[2:21])==FALSE) #count number of trials

WI.df = WI.df %>% #Basically the same as MI above
        mutate(Long = Lat, 
               Lat = Lon,
               State = "Wisconsin",
               OBS = Hare,
               Counts = WI.Trial.df$Counts, #Sum across trials
               Trials = WI.Trial.df$Trials, #trials vary by transect
               Site2 = paste("W", 1:nrow(WI.df), sep=".")) %>% #Site identifier
        select(Site2, Long, Lat, State, OBS, Counts, Trials)

dim(WI.df)
## [1] 195   7
head(WI.df)
##   Site2      Long      Lat     State OBS Counts Trials
## 1   W.1 -90.72816 44.36486 Wisconsin   1      0      8
## 2   W.2 -90.84052 44.27223 Wisconsin   0      0      6
## 3   W.3 -90.82058 44.45853 Wisconsin   1      5      8
## 4   W.4 -92.11831 45.44736 Wisconsin   0      0      6
## 5   W.5 -91.22783 45.02406 Wisconsin   0      0      6
## 6   W.6 -90.01462 44.21396 Wisconsin   0      0      8

Combine Data from MI and WI

Join data to common dataframe.

hare.df = rbind(MI.df, WI.df)

dim(hare.df)
## [1] 320   7

Convert to Points

Hare.pnt = SpatialPointsDataFrame(hare.df[, c("Long","Lat")], hare.df) 
proj4string(Hare.pnt) = "+proj=longlat +datum=NAD83 +no_defs +ellps=GRS80 +towgs84=0,0,0"

Get Domain Boundaries

Defining domain extent, downloading state boundaries, and then creating raster versions for later plotting. Note that the “Lakes” file is used to better identify lake boundaries. A copy of this shapefile is in the folder with this script. The adress to the shapefile will need to be updated in this chunk.

World = map("world", 
            fill = TRUE, 
            plot = FALSE)

IDs = sapply(strsplit(World$names, ":"), function(x) x[1])

LL84 = "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"

WorldP = map2SpatialPolygons(World, IDs = IDs,
                             proj4string = CRS(projection(LL84)))

#Add a dataframe  
pid = sapply(slot(WorldP, "polygons"), 
             function(x) slot(x, "ID"))

p.df = data.frame( ID=1:length(WorldP), 
                   row.names = pid)

World = SpatialPolygonsDataFrame(WorldP, p.df)
World = spTransform(World, proj4string(Hare.pnt ))
World = gBuffer(World, width = 0, byid = F)

#States
States = map("state", 
             fill = TRUE, 
             plot = FALSE)


IDs = sapply(strsplit(States$names, ":"), function(x) x[1])

LL84 = "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"

StatesP = map2SpatialPolygons(States, IDs = IDs,
                              proj4string = CRS(projection(LL84)))

#Add a dataframe  
pid = sapply(slot(StatesP, "polygons"), 
             function(x) slot(x, "ID"))

p.df = data.frame( ID=1:length(StatesP), 
                   row.names = pid)

States = SpatialPolygonsDataFrame(StatesP, p.df)
States = spTransform(States, proj4string(Hare.pnt ))



Lakes = readOGR(dsn = "C:/Users/humph173/Documents/Michigan_State/Marten/ArcWork/Lakes", 
                layer = "Lake_2ks", 
                stringsAsFactors = FALSE)
## OGR data source with driver: ESRI Shapefile 
## Source: "C:\Users\humph173\Documents\Michigan_State\Marten\ArcWork\Lakes", layer: "Lake_2ks"
## with 1 features
## It has 15 fields
## Integer64 fields read as strings:  OBJECTID Id area InPoly_FID SimPgnFlag
LakesLL = spTransform(Lakes, proj4string(States))


Ext = c(-93.173415, -81.932841, 41.217132, 47.740649)
Domain = crop(States, Ext)
Domain$Name = str_cap_words(rownames(Domain@data))

Water0 = as(extent(Domain), "SpatialPolygons") 
p.df = data.frame(ID=1:length(Water0))

Water0 = SpatialPolygonsDataFrame(Water0, p.df, match.ID = F)
proj4string(Water0) = proj4string(Domain)
Water1 = gDifference(Water0, spTransform(Lakes, proj4string(Water0)))

DomLLU = gUnaryUnion(Domain)

#Rasterized version  
Ras = raster(res = 0.02, ext = extent(DomLLU),
             crs = proj4string(DomLLU))

Domain.r = rasterize(DomLLU, Ras, 
                     field = 0, 
                     background = NA)

#Point grid version
Grd.pnt = rasterToPoints(Domain.r, spatial = TRUE)

Grd.pnt@data = Grd.pnt@data %>%
  mutate(Long = Grd.pnt@coords[,1],
         Lat = Grd.pnt@coords[,2]) %>%
  select(-layer)

Quick Plot to Check Data

rng = seq(0, 255, 1)

mCols  = brewer.pal(11, "RdYlBu")[-6] 
cr0 = rev(colorRampPalette((mCols))(n = 256))
cr = colorRampPalette(c("tan", cr0),  
                      bias = 1, space = "rgb")

MyMatrix = matrix(nrow=7, ncol=2)
rownames(MyMatrix) = rownames(coordinates(Domain))
MyMatrix[,1] = c(-89.07686, -86.11260, -91.91021, -83.82014, -91.31258, -83.46125, -89.51171)
MyMatrix[,2] = c(41.80000, 41.36514, 42.28710, 43.0, 47.5041, 41.61285, 43.63285)

levelplot(Domain.r, 
          margin = FALSE,
          xlab = NULL, 
          ylab = NULL, 
          maxpixels = 1e5,
          col.regions = cr, at = rng,
          colorkey = FALSE, par.strip.text = list(fontface='bold', cex=1.5),
          par.settings = list(axis.line = list(col = "black"),
                              strip.background = list(col = 'transparent'), 
                              strip.border = list(col = 'transparent')),
          scales = list(cex = 1.25)) + 
  latticeExtra::layer(sp.polygons(Domain, col = "black", lwd = 0.5)) +
  latticeExtra::layer(sp.polygons(LakesLL, fill = "lightblue", col = "transparent", lwd = 0.5)) +
  latticeExtra::layer(sp.polygons(Hare.pnt , col = "red", pch=factor(Hare.pnt$State), cex = 1)) +
  latticeExtra::layer(sp.text(MyMatrix, txt = Domain$Name, 
                              pos =c(1,3,3,2,2,1,1), 
                              col="black",font=list(face="bold"), cex=1)) +
  latticeExtra::layer({SpatialPolygonsRescale(layout.north.arrow(),
                                              offset = c(-83, 45.5),
                                              scale = 2)})

Project Everything to KM

nProj = "+proj=utm +zone=16 +datum=NAD83 +units=km +no_defs +ellps=GRS80 +towgs84=0,0,0"

LakesKM = spTransform(Lakes, nProj)
FocalKM = spTransform(Water1, nProj)
DomP = spTransform(Domain, nProj)
DomPU = gUnaryUnion(DomP)

Hare.pntP = spTransform(Hare.pnt, nProj)

Grd.pntP = spTransform(Grd.pnt, nProj)

Construct Mesh and Define Physical Barriers

These values are scaled to correspond to the geographic projection (kilometers)

max.edge = 8 #Make the outer edge length 8km
bound.outer = 75 #Outer extension can be 75km

bdry = inla.sp2segment(DomPU) #Formatting boundary for r-INLA

mesh = inla.mesh.2d(boundary = bdry, #Boundary
                    loc = Hare.pntP, #Fit to point locations
                    max.edge = c(1, 5)*max.edge, #mesh size specifications
                    cutoff = 8,
                    min.angle = 25,
                    offset = c(max.edge, bound.outer))
mesh$n #number of nodes
## [1] 5551
plot(mesh, lwd=0.5)

Define Spatial Barriers

Article (Bakka 2019): https://www.sciencedirect.com/science/article/pii/S221167531830099X See tutorial: https://haakonbakka.bitbucket.io/btopic107.html

tl = length(mesh$graph$tv[,1])

posTri = matrix(0, tl, 2)

for (t in 1:tl){
  temp = mesh$loc[mesh$graph$tv[t, ], ]
  posTri[t,] = colMeans(temp)[c(1,2)] 
}

posTri = SpatialPoints(posTri)
proj4string(posTri) = proj4string(FocalKM)

normal = over(DomPU, posTri, returnList=T)
normal = unlist(normal)
barrier.triangles = setdiff(1:tl, normal)

poly.barrier = inla.barrier.polygon(mesh, barrier.triangles)

Plot Mesh with Barriers

Red areas are barriers.

plot(mesh, main="Mesh")
plot(poly.barrier, add=T, col='red')
plot(mesh, add=T)

Combine Observations, Mesh Nodes, and Grid Points

#Node coordinates
dd = as.data.frame(cbind(mesh$loc[,1], 
                         mesh$loc[,2]))

names(dd) = c("Long", "Lat") #name coordinates
dd$OBS = 0 #no hare at these locations
dd$Site2 = paste("N", 1:nrow(dd), sep = ".") #to match with observation data
dd$State = "All" 
dd$Spp = "Mesh"
dd$Counts = 0
dd$Trials = 0

#Hare Obs
hare.set = Hare.pntP@data %>%
           mutate(Long = Hare.pntP@coords[,1],
                  Lat = Hare.pntP@coords[,2],
                  Spp = "Hare")

#Grid
grid.set = Grd.pntP@data %>%
           mutate(Long = Grd.pntP@coords[,1],
                  Lat = Grd.pntP@coords[,2],
                  Spp = "Grid",
                  OBS = 0,
                  State = "All",
                  Counts = 0,
                  Trials = 0,
                  Site2 = paste("G", 1:nrow(Grd.pntP@data), sep = "."))


All.pnts = rbind(hare.set, dd, grid.set)
All.pnts = SpatialPointsDataFrame(All.pnts[, c("Long","Lat")], All.pnts)
proj4string(All.pnts) = nProj

Extract Covariates

Forest1km = raster("C:/Users/humph173/Documents/Michigan_State/Sean/Loop_020819/Forest1km.grd")
mSnow5Yr = raster("./Hare1/Mean5yrSnow.tif")
mxTemp = raster("./Hare1/meanTMAX.tif")

All.pnts$mSnow5yrE = extract(mSnow5Yr, 
                                spTransform(All.pnts,
                                CRS(proj4string(mSnow5Yr))), 
                                method="simple") 

All.pnts$mSnow5yrE[is.na(All.pnts$mSnow5yrE)] = mean(All.pnts$mSnow5yrE, na.rm=T)

All.pnts$mxTempE = extract(mxTemp, 
                                spTransform(All.pnts,
                                CRS(proj4string(mxTemp))), 
                                method="simple") 

All.pnts$mxTempE[is.na(All.pnts$mxTempE)] = mean(All.pnts$mxTempE, na.rm=T)

All.pnts$Forest1kmE = extract(Forest1km, 
                                spTransform(All.pnts,
                                CRS(proj4string(Forest1km))), 
                                method="simple") 

All.pnts$Forest1kmE[is.na(All.pnts$Forest1kmE)] = mean(All.pnts$Forest1kmE, na.rm=T)

All.pnts$Forest1kmE = round(All.pnts$Forest1kmE/100, 1)

Identify the UP

Identify locations from the U.P. based on a well-defined county boundaries

UP = readOGR(dsn = "C:/Users/humph173/Documents/Michigan_State/SLP_Beam_Diam/Counties_v17a", 
                layer = "MI_UP", 
                stringsAsFactors = FALSE)
## OGR data source with driver: ESRI Shapefile 
## Source: "C:\Users\humph173\Documents\Michigan_State\SLP_Beam_Diam\Counties_v17a", layer: "MI_UP"
## with 15 features
## It has 15 fields
## Integer64 fields read as strings:  OBJECTID FIPSNUM
UPp = gUnaryUnion(spTransform(UP, proj4string(All.pnts)))

All.pnts$UP = is.na(over(All.pnts, UPp))

Update State Designation to Include UP

Cleaning up labels

All.pnts$StateUP = ifelse(All.pnts$UP == FALSE, "Mich.UP", All.pnts$State)
All.pnts$StateUPW = ifelse(All.pnts$StateUP == "Mich.UP", "Wisconsin", All.pnts$StateUP)
All.pnts$Domain = over(All.pnts, DomPU)

levels(factor(All.pnts$StateUP))
## [1] "All"       "Mich.UP"   "Michigan"  "Wisconsin"
levels(factor(All.pnts$StateUPW))
## [1] "All"       "Michigan"  "Wisconsin"

Seperate Datasets

Hare.mod = subset(All.pnts, Spp == "Hare") #Observations only

Mesh.mod = subset(All.pnts, Spp == "Mesh" & is.na(Domain) == FALSE) #Mesh locations excluding buffer extension

HareMesh.mod = subset(All.pnts, Spp != "Grid") #Observations excluding Grid points for prediction/plotting
HareMesh.mod$ID = 1:nrow(HareMesh.mod@data)

Grd.pnts = subset(All.pnts, Spp == "Grid") #Grid loactions for prediction/plotting

Colinearty Check

Index value needs to be below 30. This suggest colineartity will not be an issue.

library(perturb)
## 
## Attaching package: 'perturb'
## The following object is masked from 'package:raster':
## 
##     reclassify
Colin.df = HareMesh.mod@data %>% #select covariates
            select(mSnow5yrE, mxTempE, Forest1kmE)

CorCov = cor(Colin.df) #calculate correlation

corrplot(CorCov) #view correlation table

CI = colldiag(CorCov) #Apply metric
CI
## Condition
## Index    Variance Decomposition Proportions
##          intercept mSnow5yrE mxTempE Forest1kmE
## 1  1.000 0.028     0.014     0.180   0.025     
## 2  1.711 0.661     0.001     0.540   0.001     
## 3  6.042 0.311     0.985     0.280   0.974

Define Regions

Converting each region to an integer value to be used to internally “replicate” estimates by region.

HR.df = HareMesh.mod@data

levels(factor(HR.df$StateUP)) #Level names
## [1] "All"       "Mich.UP"   "Michigan"  "Wisconsin"
HR.df$StateUPWI = as.integer(as.factor(HR.df$StateUP)) #convert to integer
levels(factor(HR.df$StateUPWI)) #levels as integer
## [1] "1" "2" "3" "4"
#Keep U.P. Hare observations as "1", set other regions to "0"
MupSet = HR.df
MupSet$OBS2 = ifelse(MupSet$StateUPWI == "2", MupSet$OBS, 0)

#Keep Lower MI Hare observations as "1", set other regions to "0"
MSet = HR.df
MSet$OBS2 = ifelse(MSet$StateUPWI == "3", MSet$OBS, 0)


#Keep Wisconsin Hare observations as "1", set other regions to "0"
WSet = HR.df
WSet$OBS2 = ifelse(WSet$StateUPWI == "4", WSet$OBS, 0)

MupSet$Rep = 1 #Renumbering from 1-3 instead of 2-4
MSet$Rep = 2
WSet$Rep = 3

HR.df = rbind(MupSet, MSet, WSet) #Join data

HR.df %>% #Count of hare observations by region
  group_by(Rep) %>%
  summarise(Cnt = sum(OBS2))
## # A tibble: 3 x 2
##     Rep   Cnt
##   <dbl> <dbl>
## 1     1    38
## 2     2    38
## 3     3    53

Model Set-up

Spatial Priors and Indicies

Relating observations to mesh locations as a matrix and defining flat spatial priors.

#Relate mesh for detection level
locs = cbind(Hare.mod@coords[,1], Hare.mod@coords[,2]) #point locations

A.det = inla.spde.make.A(mesh, #the mesh
                         alpha = 2, #default setting
                         loc=locs) #our locations



#Relate mesh for covariate level
locs = cbind(HR.df[,"Long"], HR.df[,"Lat"])

A.env = inla.spde.make.A(mesh, 
                          alpha = 2,
                          loc=locs)


#Prior for barrier model
barrier.model = inla.barrier.pcmatern(mesh, #mesh
                                      barrier.triangles = barrier.triangles, #Lake boundaries
                                      prior.range = c(1000, 0.5), #0.5 probabilty of effect within 1000 km
                                      prior.sigma = c(1, 0.01))

#Same as above, but without barriers
spde = inla.spde2.pcmatern(mesh, 
                           prior.range = c(1000, 0.5), 
                           prior.sigma = c(1, 0.01))


#Create index to track locations of mesh nodes
field.det = inla.spde.make.index("field.det", spde$n.spde) #index for detection level

field.det.c = inla.spde.make.index("field.det.c", spde$n.spde) #copy of above to pass to covariate level

field.env = inla.spde.make.index("field.env", spde$n.spde) #index for covariate level

Seperate Set for Base Model

locs = cbind(HareMesh.mod@coords[,1], HareMesh.mod@coords[,2]) 

A.base = inla.spde.make.A(mesh, 
                          alpha = 2,
                          loc=locs)

field.base = inla.spde.make.index("field.base", spde$n.spde)


HR.df2 = HareMesh.mod@data

base.lst = list(c(field.base, 
                  list(intercept3 = 1)), 
                  list(XX = HR.df2[,"Long"])) #Placeholder only

base.stk = inla.stack(data = list(Y = HR.df2$OBS), #Standard model for comparison
                         A = list(A.base, 1), 
                   effects = base.lst,   
                       tag = "base.0")

Organize Data for Fitting

Need to use list objects rather than data frames.

#Detection level
DT.df = Hare.mod@data

DT.lst = list(c(field.det,                            #Spatial index
                list(intercept1 = 1)),                #Intercept
                list(XX = DT.df[,"Lat"],              #List of variables/covariates (placeholder for detection)
                     Site = DT.df[,"Site2"]))         #Site identifier (to allow sites to idependently vary)

detect.stk = inla.stack(data = list(Y = DT.df$Counts, #Number of successes
                Field.trials = DT.df$Trials),         #trials       
                           A = list(A.det, 1),        #Projection matrix
                     effects = DT.lst,                #Spatial index and covariates
                         tag = "det.0")               #just a name/tag

jdetect.stk = inla.stack(data = list(Y = cbind(DT.df$Counts, NA), #"NA" = space for next model level
                 Field.trials = DT.df$Trials), #as above
                            A = list(A.det, 1), 
                      effects = DT.lst,   
                          tag = "jdet.0")


###Environment/covariate level
HR.lst = list(c(field.env, #index for covariate level
                field.det.c, #copy of field from detection level
                list(intercept2 = 1)), #intercept
                list(mSnw5yr = round(HR.df[,"mSnow5yrE"], 3), #Snow weeks
                     mMxTemp = round(HR.df[,"mxTempE"], 3), #Temperature
                     Forest = HR.df[,"Forest1kmE"], #Forest
                     Region = HR.df[,"Rep"])) #Region identifier

env.stk = inla.stack(data = list(Y = HR.df$OBS, #Standard model for comparison
              Field.trials = rep(1, dim(HR.df)[1])), #1 trial
                         A = list(A.env, 1), 
                   effects = HR.lst,   
                       tag = "env.0")

jenv.stk = inla.stack(data = list(Y = cbind(NA, HR.df$OBS2), #NA = space for detection level
              Field.trials = rep(1, dim(HR.df)[1])),
                         A = list(A.env, 1), 
                   effects = HR.lst,   
                       tag = "jenv.0")


#Combine detection and covariate levels
Joint.stk = inla.stack(jdetect.stk, jenv.stk)

#Save data to run in HPCC
#save(list=c("Joint.stk", "barrier.model", "spde"), file="./HPC/Feb10/Comb_0210.RData") #File for HPCC

Full Model (Barrier)

The joint models are computationaly expensive, so there were ran on the HPCC.

Site.prior = list(theta=list(prior = "normal", param=c(0, 3))) #Prior for site effect
Reg.prior = list(theta=list(prior = "normal", param=c(0, 3))) #prior for region

JFrm0 = Y ~ -1 + intercept1 + #intercept (detection level)
                 intercept2 + #Intercept (covariate level)
                f(field.det,  #spatial index (detection)
                  model=barrier.model) + #change "barrier.model" to "spde" for no-barrier version.
                f(field.det.c,       #Shared spatial field to account for correlation between model levels
                  copy = "field.det", #copied from detection level of model
                  fixed = FALSE) +
                f(field.env,  #spatial index for covariate level
                  model=barrier.model) +
                f(Site, #Site identifier
                  model="iid", 
                  hyper=Site.prior) + #site prior
               f(mMxTemp,         #temperature
                  model="rw1",   #random walk of order 1
                  replicate = Region, #replicate for each region
                  hyper = Reg.prior) + #prior
               f(mSnw5yr, #weekly snowfall
                  model="rw1",
                  replicate = Region,
                  hyper = Reg.prior) +
               f(Forest, #Forest
                  model="iid",
                  replicate = Region,
                  hyper = Reg.prior) 
  
  
#thetaJ = JModel.full$internal.summary.hyperpar$mean #from initial run (to speed up model)
thetaJ = c(1.1935975, 3.7979780, -2.1432853, 6.6863289, -0.6710575, -0.1090578, -0.9891725, 0.6587151, 1)

JModel.full = inla(JFrm0, #formula
               data = inla.stack.data(Joint.stk), #data list object
               family = c("binomial","binomial"), #families for detection and covariate levels
               verbose = FALSE, #Show running process
               Ntrials = inla.stack.data(Joint.stk)$Field.trials, #number trials (varies for detection level)
               control.fixed = list(prec = 0.001, #priors for intercept
                          prec.intercept = 0.0001), 
               control.predictor = list(
                               A = inla.stack.A(Joint.stk), #data again
                         compute = TRUE, #estimate fitted values
                            link = 1), #transform fitted values from logit
               control.mode = list(restart = TRUE, theta = thetaJ), #to speed up
               control.inla = list(strategy="gaussian", #to speed up
                                   int.strategy = "eb"),
               control.results = list(return.marginals.random = TRUE, #results to report
                                      return.marginals.predictor = TRUE),
               control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE)) #calculate indicies for comparison

Load Results from HPCC

load("~/Michigan_State/Sean/HPC/Feb10/Full_0210_RES.RData")

Model Summary

summary(JModel.full)
## 
## Call:
## c("inla(formula = JFrm0, family = c(\"binomial\", \"binomial\"), data = inla.stack.data(Joint.stk), ",  "    Ntrials = inla.stack.data(Joint.stk)$Field.trials, verbose = TRUE, ",  "    control.compute = list(dic = TRUE, cpo = TRUE, waic = TRUE), ",  "    control.predictor = list(A = inla.stack.A(Joint.stk), compute = TRUE, ",  "        link = 1), control.inla = list(strategy = \"gaussian\", ",  "        int.strategy = \"eb\"), control.results = list(return.marginals.random = TRUE, ",  "        return.marginals.predictor = TRUE), control.fixed = list(prec = 0.001, ",  "        prec.intercept = 1e-04), control.mode = list(restart = TRUE, ",  "        theta = thetaJ), num.threads = 8)")
## 
## Time used:
##  Pre-processing    Running inla Post-processing           Total 
##          4.4073      16567.9072          6.2731      16578.5876 
## 
## Fixed effects:
##               mean     sd 0.025quant 0.5quant 0.975quant    mode kld
## intercept1 -4.2962 0.5153    -5.3080  -4.2963    -3.2854 -4.2962   0
## intercept2 -7.8112 0.5426    -8.8766  -7.8112    -6.7467 -7.8112   0
## 
## Random effects:
## Name   Model
##  field.det   RGeneric2 
## field.env   RGeneric2 
## Site   IID model 
## mMxTemp   RW1 model 
## mSnw5yr   RW1 model 
## Forest   RW1 model 
## field.det.c   Copy 
## 
## Model hyperparameters:
##                          mean     sd 0.025quant 0.5quant 0.975quant
## Theta1 for field.det   1.1318 0.1286     0.8887   1.1277     1.3942
## Theta2 for field.det   3.9663 0.2198     3.5218   3.9713     4.3876
## Theta1 for field.env  -1.9256 0.9071    -4.0266  -1.7800    -0.5826
## Theta2 for field.env   6.4645 0.6470     5.3985   6.3888     7.9019
## Precision for Site     0.4169 0.1159     0.2327   0.4027     0.6848
## Precision for mMxTemp  0.6397 0.2478     0.2805   0.5985     1.2386
## Precision for mSnw5yr  1.0801 0.4197     0.4977   1.0004     2.1206
## Precision for Forest   1.7988 0.7076     0.7745   1.6816     3.5102
## Beta for field.det.c   0.4968 0.0692     0.3626   0.4961     0.6342
##                          mode
## Theta1 for field.det   1.1128
## Theta2 for field.det   3.9890
## Theta1 for field.env  -1.1864
## Theta2 for field.env   6.1023
## Precision for Site     0.3757
## Precision for mMxTemp  0.5223
## Precision for mSnw5yr  0.8630
## Precision for Forest   1.4636
## Beta for field.det.c   0.4934
## 
## Expected number of effective parameters(std dev): 317.82(0.00)
## Number of equivalent replicates : 56.42 
## 
## Deviance Information Criterion (DIC) ...............: 2049.43
## Deviance Information Criterion (DIC, saturated) ....: NULL
## Effective number of parameters .....................: 459.23
## 
## Watanabe-Akaike information criterion (WAIC) ...: 2285.58
## Effective number of parameters .................: 454.01
## 
## Marginal log-Likelihood:  -35119.18 
## CPO and PIT are computed
## 
## Posterior marginals for linear predictor and fitted values computed

Combined Model

Also ran covariate model version without replications by region. Loading results:

load("~/Michigan_State/Sean/HPC/Feb10/Combined_02102018.RData")

Base Model (No Covariates, No Trials)

For Comparison.

JFrmB = Y ~ -1 + intercept3 + #Intercept (covariate level)
                f(field.base,  #spatial index for covariate level
                  model=barrier.model) 
  
  
#thetaJ = JModel.base$internal.summary.hyperpar$mean #from initial run (to speed up model)
thetaJ = c(1.444903, 4.073292)

JModel.base = inla(JFrmB, #formula
               data = inla.stack.data(base.stk), #data list object
               family = "binomial",#family 
               verbose = FALSE, #Show running process
               #Ntrials = inla.stack.data(Joint.stk)$Field.trials, #default is 1
               control.fixed = list(prec = 0.001, #priors for intercept
                          prec.intercept = 0.0001), 
               control.predictor = list(
                               A = inla.stack.A(base.stk), #data again
                         compute = TRUE, #estimate fitted values
                            link = 1), #transform fitted values from logit
               control.mode = list(restart = TRUE, theta = thetaJ), #to speed up
               control.inla = list(strategy="gaussian", #to speed up
                                   int.strategy = "eb"),
               control.results = list(return.marginals.random = TRUE, #results to report
                                      return.marginals.predictor = TRUE),
               control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE)) #calculate indicies for comparison

Results

Maximum Temperature Compare (Full Model)

Comb.plt.df = as.data.frame(JModel.full$summary.random$mMxTemp)[,1:6] #Get effect estimates (Full model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     #Clean labels
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 3501) #Add Region ID

Comb.plt.df2 = as.data.frame(JModel.comb$summary.random$mMxTemp)[,1:6] #Get effect estimates (No replicates model)
names(Comb.plt.df2) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df2$Region = "Combined" #ID

Comb.plt.df = rbind(Comb.plt.df, Comb.plt.df2) #Combine for plotting


Plt.df = Comb.plt.df
Plt.df$Region = as.factor(Plt.df$Region)

RAnge = range(Plt.df$ID)

Plt.all = ggplot(Plt.df, aes(ID, Mean, group = Region)) +
            geom_smooth(aes(col = Region, 
                      linetype= Region),
                      method = "loess",
                      span = 0.3,
                      se = FALSE,
                      lwd = 1) +
            scale_linetype_manual(values=c("solid", "longdash", "dotted", "dashed")) +
            scale_colour_manual(values=c("red", "brown", "blue", "black")) +
            geom_hline(yintercept = 0, 
                       linetype = "solid",
                       col = "lightgray",
                       size = 0.5) +  
            geom_vline(xintercept = 0, 
                       linetype = "dotted",
                       col = "red",
                       size = 0.5) +
            xlim(RAnge[1], RAnge[2]) +
            xlab("Mean Maximum Temperature (°C)") +
            ylab("Occurrence Probability (logit)") +  
            theme_classic() +
            theme(axis.text=element_text(size=16),
                   legend.title = element_text(size=16, face="bold"),
                  legend.key = element_rect(fill = "gray80", linetype=0),
                  legend.background = element_rect(fill = "gray80", linetype=0),
                  strip.text = element_text(face="bold", size = 20),
                  axis.title.y = element_text(face="bold", size = 20),
                  axis.title.x = element_text(face="bold", size = 20, vjust=-2))

Plt.all
## Warning: Removed 1 rows containing missing values (geom_vline).

Snow Weeks Compare (Full Model)

Comb.plt.df = as.data.frame(JModel.full$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 90)

Comb.plt.df2 = as.data.frame(JModel.comb$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df2) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")

Comb.plt.df2$Region = "Combined"

Comb.plt.df = rbind(Comb.plt.df, Comb.plt.df2)


Plt.df = Comb.plt.df

Plt.df$Region = as.factor(Plt.df$Region)

RAnge = range(Plt.df$ID)

Plt.all = ggplot(Plt.df, aes(ID, Mean, group = Region)) +
            geom_smooth(aes(col = Region, 
                      linetype= Region),
                      method = "loess",
                      span = 0.3,
                      se = FALSE,
                      lwd = 1) +
            scale_linetype_manual(values=c("solid", "longdash", "dotted", "dashed")) +
            scale_colour_manual(values=c("red", "brown", "blue", "black")) +
            geom_hline(yintercept = 0, 
                       linetype = "solid",
                       col = "lightgray",
                       size = 0.5) +  
            geom_vline(xintercept = 0, 
                       linetype = "dotted",
                       col = "red",
                       size = 0.5) +
            xlim(RAnge[1], RAnge[2]) +
            xlab("Snow Weeks") +
            ylab("Occurrence Probability (logit)") +  
            theme_classic() +
            theme(axis.text=element_text(size=16),
                   legend.title = element_text(size=16, face="bold"),
                  legend.key = element_rect(fill = "gray80", linetype=0),
                  legend.background = element_rect(fill = "gray80", linetype=0),
                  strip.text = element_text(face="bold", size = 20),
                  axis.title.y = element_text(face="bold", size = 20),
                  axis.title.x = element_text(face="bold", size = 20, vjust=-2))

Plt.all

Compare Forest (Full Model)

Comb.plt.df = as.data.frame(JModel.full$summary.random$Forest)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 110)

Comb.plt.df2 = as.data.frame(JModel.comb$summary.random$Forest)[,1:6]
names(Comb.plt.df2) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")

Comb.plt.df2$Region = "Combined"

Plt.df = Comb.plt.df

Plt.df$Region = as.factor(Plt.df$Region)

RAnge = range(Plt.df$ID)

Plt.all = ggplot(Plt.df, aes(ID, Mean, group = Region)) +
            geom_smooth(aes(col = Region, 
                      linetype= Region),
                      method = "loess",
                      span = 0.3,
                      se = FALSE,
                      lwd = 1) +
            geom_smooth(data=Comb.plt.df2,
                          aes(ID, Mean, col = Region, 
                      linetype= Region),
                      method = "loess",
                      span = 0.6,
                      se = FALSE,
                      lwd = 1) +
            scale_linetype_manual(values=c("solid", "longdash", "dotted", "dashed")) +
            scale_colour_manual(values=c("red", "brown", "blue", "black")) +
            geom_hline(yintercept = 0, 
                       linetype = "solid",
                       col = "lightgray",
                       size = 0.5) +  
            geom_vline(xintercept = 0, 
                       linetype = "dotted",
                       col = "red",
                       size = 0.5) +
            xlim(RAnge[1], RAnge[2]) +
            xlab("Forest (1km Grid)") +
            ylab("Occurrence Probability (logit)") +  
            theme_classic() +
            theme(axis.text=element_text(size=16),
                   legend.title = element_text(size=16, face="bold"),
                  legend.key = element_rect(fill = "gray80", linetype=0),
                  legend.background = element_rect(fill = "gray80", linetype=0),
                  strip.text = element_text(face="bold", size = 20),
                  axis.title.y = element_text(face="bold", size = 20),
                  axis.title.x = element_text(face="bold", size = 20, vjust=-2))

Plt.all

Detection Site Effect (Full Model)

PhySig.df2 = as.data.frame(JModel.full$summary.random$Site)

names(PhySig.df2) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975", "mode", "kld")

PhySig.df2$PSigL = ifelse(PhySig.df2$Q025>0 & PhySig.df2$Q975>0, 1, 0)
PhySig.df2$PSigH = ifelse(PhySig.df2$Q025<0 & PhySig.df2$Q975<0, 1, 0)

PhySig.df3 = PhySig.df2 %>%
              filter(PSigL == 1 | PSigH == 1)

RM.m2 = ggplot(PhySig.df2, aes(x=ID, y=Mean)) + 
                geom_point(size=2, pch=1, col = "gray75") +
                geom_linerange(aes(ymin=Q025, ymax=Q975), colour="gray75") +
                geom_point(data=PhySig.df3, aes(x=ID, y=Mean), 
                           size=2, pch=19, col = "red") +
                geom_linerange(data=PhySig.df3, aes(ymin=Q025, ymax=Q975), colour="black") +
                geom_hline(yintercept = 0, 
                            linetype = "dotted",
                               colour = "red",
                               size = 0.75) +
                    theme_classic() +
                           xlab("Site Location") +
                           ylab("Detection Probabilty (logit)") + 
                           theme(axis.title.y = element_text(face="bold", size=14),
                                 axis.title.x = element_text(face="bold", size=14),
                                 axis.text.y = element_text(face="bold", size=12),
                                 axis.ticks.x=element_blank(),
                                 axis.text.x = element_blank())

RM.m2

Compare Spatial Fields

Pred.pnts = Grd.pnts  #Dense grid created during pre-processing
ModResult = JModel.full ##Full Model
ModResult2 = JModel.comb ##Combined Model
ModResult3 = JModel.base 

pLoc = cbind(Pred.pnts@coords[,1], Pred.pnts@coords[,2]) #coords
Ap = inla.spde.make.A(mesh, loc=pLoc) #Relate to mesh

Pred.pnts$Full = drop(Ap %*% ModResult$summary.random$field.env$mean) 

Pred.pnts$Comb = drop(Ap %*% ModResult2$summary.random$field.env$mean) 

Pred.pnts$BaseEnv = drop(Ap %*% ModResult3$summary.random$field.base$mean) #Get random field by location

#Create rasters
Full.rf.r = rasterize(spTransform(
                      Pred.pnts,
                      CRS(proj4string(Domain.r))), 
                      Domain.r, 
                      "Full", 
                      background = NA)

Comb.rf.r = rasterize(spTransform(
                      Pred.pnts,
                      CRS(proj4string(Domain.r))), 
                      Domain.r, 
                      "Comb", 
                      background = NA)

Base.rf.r = rasterize(spTransform(
                      Pred.pnts,
                      CRS(proj4string(Domain.r))), 
                      Domain.r, 
                      "BaseEnv", 
                      background = NA)



RF.stk = stack(Base.rf.r, Comb.rf.r, Full.rf.r)
names(RF.stk) = c("Base", "Combined", "Full")


rng = seq(-1.4, 5.7, 0.1)

mCols = brewer.pal(11, "RdYlGn")
cr = rev(colorRampPalette(mCols)(n = 500))

cr = colorRampPalette(cr,  
          bias = 2.4, space = "rgb")

levelplot(RF.stk, 
          layout = c(3,1),
          margin = FALSE,
          xlab = NULL, 
          ylab = NULL, 
          names.attr = c("Base", "Combined", "Full"),
          maxpixels = 1e5,
          col.regions = cr, at = rng,
          colorkey = list(labels=list(cex=1.5),
                          space = "bottom"),
          par.strip.text = list(fontface='bold', cex=1.5),
          par.settings = list(axis.line = list(col = "black"),
                              strip.background = list(col = 'transparent'), 
                              strip.border = list(col = 'transparent')),
          scales = list(cex = 1.25)) + 
  latticeExtra::layer(sp.polygons(Domain, col = "black", lwd = 0.5)) +
  #latticeExtra::layer(sp.polygons(Hare.pnt , col = "black", pch=2, cex = 0.25)) +
  #latticeExtra::layer(sp.text(MyMatrix, txt = Domain$Name, 
  #                            pos =c(1,3,3,2,2,1,1), 
  #                            col="black",font=list(face="bold"), cex=1)) +
  latticeExtra::layer({SpatialPolygonsRescale(layout.north.arrow(),
                                              offset = c(-83, 45.5),
                                              scale = 2)})

Get Predictions

Full Model

Based on covariates only.

ModResult = JModel.full

#Update Region ID for prediction locations (MI and WI only)
MI_WI_Domain = subset(Domain, Name == "Michigan" | Name == "Wisconsin")

Pred.pnts = Grd.pnts

MI_WI_DomainP = spTransform(MI_WI_Domain, proj4string(Pred.pnts))

Pred.pnts$nDom = over(Pred.pnts, MI_WI_DomainP)[,1]

Pred.pntsN = subset(Pred.pnts, is.na(nDom) == FALSE)

Pred.pntsN$Region = ifelse(Pred.pntsN@data$nDom == 21 & Pred.pntsN@data$UP == "FALSE", "Upper", #FALSE indicates UP
                        ifelse(Pred.pntsN@data$nDom == 21 & Pred.pntsN@data$UP == "TRUE", "Lower",
                           ifelse(Pred.pntsN@data$nDom == 48, "Wisconsin", NA)))


#Get RF
pLoc = cbind(Pred.pntsN@coords[,1], Pred.pntsN@coords[,2])
Ap = inla.spde.make.A(mesh, loc=pLoc)

Pred.pntsN$Full.rf = drop(Ap %*% ModResult$summary.random$field.env$mean) 


#Get Temperature
Comb.plt.df = as.data.frame(JModel.full$summary.random$mMxTemp)[,1:6] #Get effect estimates (Full model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     #Clean labels
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 3501) 

#Region-sepcific estimates
UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")

UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]

#Look up UP Temp Estimates
Pred.pntsN$UP.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - UP.lu$ID)))
Pred.pntsN$UP.TmpEst = as.numeric(with(UP.lu,
                              Mean[match(Pred.pntsN$UP.temp, 
                                                       UP.POS)]))

#Look up Lower Temp Estimates
Pred.pntsN$LW.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
                              Mean[match(Pred.pntsN$LW.temp, 
                                                       LW.POS)]))


#Look up WI Temp Estimates
Pred.pntsN$WI.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
                              Mean[match(Pred.pntsN$WI.temp, 
                                                       WI.POS)]))

#Match to region
Pred.pntsN$Temp.Full = ifelse(Pred.pntsN$Region == "Upper", Pred.pntsN$UP.TmpEst,
                            ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst,  
                               ifelse(Pred.pntsN$Region == "Wisconsin", Pred.pntsN$WI.TmpEst, NA)))




#Get Snow Week (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.full$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 90) 

UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")

UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]

Pred.pntsN$UP.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - UP.lu$ID)))
Pred.pntsN$UP.TmpEst = as.numeric(with(UP.lu,
                              Mean[match(Pred.pntsN$UP.temp, 
                                                       UP.POS)]))

Pred.pntsN$LW.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
                              Mean[match(Pred.pntsN$LW.temp, 
                                                       LW.POS)]))


Pred.pntsN$WI.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
                              Mean[match(Pred.pntsN$WI.temp, 
                                                       WI.POS)]))

#Match to region
Pred.pntsN$Snow.Full = ifelse(Pred.pntsN$Region == "Upper", Pred.pntsN$UP.TmpEst,
                            ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst,  
                               ifelse(Pred.pntsN$Region == "Wisconsin", Pred.pntsN$WI.TmpEst, NA)))



#Get Forest (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.full$summary.random$Forest)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 110)

UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")

UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]

Pred.pntsN$UP.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - UP.lu$ID)))
Pred.pntsN$UP.TmpEst = as.numeric(with(UP.lu,
                              Mean[match(Pred.pntsN$UP.temp, 
                                                       UP.POS)]))

Pred.pntsN$LW.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
                              Mean[match(Pred.pntsN$LW.temp, 
                                                       LW.POS)]))


Pred.pntsN$WI.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
                              Mean[match(Pred.pntsN$WI.temp, 
                                                       WI.POS)]))

#Match to region
Pred.pntsN$Forest.Full = ifelse(Pred.pntsN$Region == "Upper", Pred.pntsN$UP.TmpEst,
                            ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst,  
                               ifelse(Pred.pntsN$Region == "Wisconsin", Pred.pntsN$WI.TmpEst, NA)))


Pred.pntsN@data = Pred.pntsN@data %>%
                       mutate(Pred.Full = plogis(Temp.Full + Snow.Full + Forest.Full))



Full.pred.r = rasterize(spTransform(
                      Pred.pntsN,
                      CRS(proj4string(Domain.r))), 
                      Domain.r, 
                      "Pred.Full", 
                      background = NA)

Combined Model Predictions

ModResult = JModel.comb

Pred.pnts = Pred.pntsN

#Get RF
pLoc = cbind(Pred.pntsN@coords[,1], Pred.pntsN@coords[,2])
Ap = inla.spde.make.A(mesh, loc=pLoc)

Pred.pntsN$Comb.rf = drop(Ap %*% ModResult$summary.random$field.env$mean) 


#Get Temperature
Comb.plt.df = as.data.frame(JModel.comb$summary.random$mMxTemp)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     

#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Pred.pntsN$UP.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Pred.pntsN$cmb.TmpEst = as.numeric(with(Comb.plt.df,
                              Mean[match(Pred.pntsN$UP.temp, 
                                                       Comb.POS)]))

#Get Snow
Comb.plt.df = as.data.frame(JModel.comb$summary.random$mSnw5yr)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     

#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Pred.pntsN$UP.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Pred.pntsN$Cmb.SnwEst = as.numeric(with(Comb.plt.df,
                              Mean[match(Pred.pntsN$UP.temp, 
                                                       Comb.POS)]))


#Get Forest
Comb.plt.df = as.data.frame(JModel.comb$summary.random$Forest)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     

#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Pred.pntsN$UP.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Pred.pntsN$Cmb.ForEst = as.numeric(with(Comb.plt.df,
                              Mean[match(Pred.pntsN$UP.temp, 
                                                       Comb.POS)]))

Pred.pntsN@data = Pred.pntsN@data %>%
                       mutate(Pred.Comb = plogis(cmb.TmpEst + Cmb.SnwEst + Cmb.ForEst))



Comb.pred.r = rasterize(spTransform(
                      Pred.pntsN,
                      CRS(proj4string(Domain.r))), 
                      Domain.r, 
                      "Pred.Comb", 
                      background = NA)

Compare Predictions

Pred.stk = stack(Comb.pred.r, Full.pred.r)
names(Pred.stk) = c("Combined", "Full")

rng = seq(0, 1, 0.01)

mCols = brewer.pal(9, "YlOrBr")
cr = colorRampPalette(mCols)(n = 500)

cr = colorRampPalette(cr,  
          bias = 1, space = "rgb")

levelplot(Pred.stk, #Trial.comp,
          layout = c(2,1),
          margin = FALSE,
          xlab = NULL, 
          ylab = NULL, 
          #main = "Occurrence Probabilty",
          maxpixels = 1e5,
          col.regions = cr, at = rng,
          colorkey = list(labels=list(cex=1.5),
                          space = "bottom"),
          par.strip.text = list(fontface='bold', cex=1.5),
          par.settings = list(axis.line = list(col = "black"),
                              strip.background = list(col = 'transparent'), 
                              strip.border = list(col = 'transparent')),
          scales = list(cex = 1.25)) + 
  latticeExtra::layer(sp.polygons(Domain, col = "black", lwd = 1)) +
  #latticeExtra::layer(sp.polygons(Hare.pnt , col = "black", pch=2, cex = 0.25)) +
  latticeExtra::layer({SpatialPolygonsRescale(layout.north.arrow(),
                                              offset = c(-83, 45.5),
                                              scale = 2)})

Compare Model Metrics

Prediction for Sample Locations

Obs.set = subset(HareMesh.mod, OBS == 1)

#Update Region ID for prediction locations (MI and WI only)
MI_WI_Domain = subset(Domain, Name == "Michigan" | Name == "Wisconsin")

Obs.set$nDom = over(Obs.set, MI_WI_DomainP)[,1]

Obs.set$Region = ifelse(Obs.set@data$nDom == 21 & Obs.set@data$UP == "FALSE", "Upper", #FALSE indicates UP
                        ifelse(Obs.set@data$nDom == 21 & Obs.set@data$UP == "TRUE", "Lower",
                           ifelse(Obs.set@data$nDom == 48, "Wisconsin", NA)))


Obs.set = Obs.set@data

dim(Obs.set)
## [1] 129  18
sum(Obs.set$OBS)
## [1] 129
Obs.set %>%
  group_by(Region) %>%
  summarise(Count = length(Region))
## # A tibble: 3 x 2
##   Region    Count
##   <chr>     <int>
## 1 Lower        38
## 2 Upper        38
## 3 Wisconsin    53
#Get Temperature
Comb.plt.df = as.data.frame(JModel.full$summary.random$mMxTemp)[,1:6] #Get effect estimates (Full model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     #Clean labels
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 3501) 

#Region-sepcific estimates
UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")

UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]

#Look up UP Temp Estimates
Obs.set$UP.temp = sapply(Obs.set$mxTempE, function(x)which.min(abs(x - UP.lu$ID)))
Obs.set$UP.TmpEst = as.numeric(with(UP.lu,
                              Mean[match(Obs.set$UP.temp, 
                                                       UP.POS)]))

#Look up Lower Temp Estimates
Obs.set$LW.temp = sapply(Obs.set$mxTempE, function(x)which.min(abs(x - LW.lu$ID)))
Obs.set$LW.TmpEst = as.numeric(with(LW.lu,
                              Mean[match(Obs.set$LW.temp, 
                                                       LW.POS)]))


#Look up WI Temp Estimates
Obs.set$WI.temp = sapply(Obs.set$mxTempE, function(x)which.min(abs(x - WI.lu$ID)))
Obs.set$WI.TmpEst = as.numeric(with(WI.lu,
                              Mean[match(Obs.set$WI.temp, 
                                                       WI.POS)]))

#Match to region
Obs.set$Temp.Full = ifelse(Obs.set$Region == "Upper", Obs.set$UP.TmpEst,
                            ifelse(Obs.set$Region == "Lower", Obs.set$LW.TmpEst,  
                               ifelse(Obs.set$Region == "Wisconsin", Obs.set$WI.TmpEst, NA)))




#Get Snow Week (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.full$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 90) 

UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")

UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]

Obs.set$UP.temp = sapply(Obs.set$mSnow5yrE, function(x)which.min(abs(x - UP.lu$ID)))
Obs.set$UP.TmpEst = as.numeric(with(UP.lu,
                              Mean[match(Obs.set$UP.temp, 
                                                       UP.POS)]))

Obs.set$LW.temp = sapply(Obs.set$mSnow5yrE, function(x)which.min(abs(x - LW.lu$ID)))
Obs.set$LW.TmpEst = as.numeric(with(LW.lu,
                              Mean[match(Obs.set$LW.temp, 
                                                       LW.POS)]))


Obs.set$WI.temp = sapply(Obs.set$mSnow5yrE, function(x)which.min(abs(x - WI.lu$ID)))
Obs.set$WI.TmpEst = as.numeric(with(WI.lu,
                              Mean[match(Obs.set$WI.temp, 
                                                       WI.POS)]))

#Match to region
Obs.set$Snow.Full = ifelse(Obs.set$Region == "Upper", Obs.set$UP.TmpEst,
                            ifelse(Obs.set$Region == "Lower", Obs.set$LW.TmpEst,  
                               ifelse(Obs.set$Region == "Wisconsin", Obs.set$WI.TmpEst, NA)))



#Get Forest (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.full$summary.random$Forest)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 110)

UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")

UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]

Obs.set$UP.temp = sapply(Obs.set$Forest1kmE, function(x)which.min(abs(x - UP.lu$ID)))
Obs.set$UP.TmpEst = as.numeric(with(UP.lu,
                              Mean[match(Obs.set$UP.temp, 
                                                       UP.POS)]))

Obs.set$LW.temp = sapply(Obs.set$Forest1kmE, function(x)which.min(abs(x - LW.lu$ID)))
Obs.set$LW.TmpEst = as.numeric(with(LW.lu,
                              Mean[match(Obs.set$LW.temp, 
                                                       LW.POS)]))


Obs.set$WI.temp = sapply(Obs.set$Forest1kmE, function(x)which.min(abs(x - WI.lu$ID)))
Obs.set$WI.TmpEst = as.numeric(with(WI.lu,
                              Mean[match(Obs.set$WI.temp, 
                                                       WI.POS)]))

#Match to region
Obs.set$Forest.Full = ifelse(Obs.set$Region == "Upper", Obs.set$UP.TmpEst,
                            ifelse(Obs.set$Region == "Lower", Obs.set$LW.TmpEst,  
                               ifelse(Obs.set$Region == "Wisconsin", Obs.set$WI.TmpEst, NA)))


Obs.set = Obs.set %>%
          mutate(Pred.Full = plogis(Temp.Full + Snow.Full + Forest.Full))


Obs.set$Prop = Obs.set$Counts/Obs.set$Trials

Full.BS = mean((Obs.set$Pred.Full - Obs.set$Prop)^2)
Full.BS
## [1] 0.3470399
#Combined Model
#Get Temperature
Comb.plt.df = as.data.frame(JModel.comb$summary.random$mMxTemp)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     

#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Obs.set$UP.temp = sapply(Obs.set$mxTempE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Obs.set$cmb.TmpEst = as.numeric(with(Comb.plt.df,
                              Mean[match(Obs.set$UP.temp, 
                                                       Comb.POS)]))

#Get Snow
Comb.plt.df = as.data.frame(JModel.comb$summary.random$mSnw5yr)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     

#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Obs.set$UP.temp = sapply(Obs.set$mSnow5yrE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Obs.set$Cmb.SnwEst = as.numeric(with(Comb.plt.df,
                              Mean[match(Obs.set$UP.temp, 
                                                       Comb.POS)]))


#Get Forest
Comb.plt.df = as.data.frame(JModel.comb$summary.random$Forest)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")     

#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Obs.set$UP.temp = sapply(Obs.set$Forest1kmE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Obs.set$Cmb.ForEst = as.numeric(with(Comb.plt.df,
                              Mean[match(Obs.set$UP.temp, 
                                                       Comb.POS)]))


Obs.set = Obs.set %>%
          mutate(Pred.Comb = plogis(cmb.TmpEst + Cmb.SnwEst + Cmb.ForEst))

Comb.BS = mean((Obs.set$Pred.Comb - Obs.set$Prop)^2)
Comb.BS
## [1] 0.3537512

Model Compare

#Environment Level
Compare.tab = as.data.frame(matrix(ncol = 4, nrow= 2))
names(Compare.tab) = c("Model", "DIC", "WAIC", "MSE")

Compare.tab[1, 1] = "Combined"
Compare.tab[1, 2] = round(GetMets(JModel.comb)[1,3],2)
Compare.tab[1, 3] = round(GetMets(JModel.comb)[2,3],2)
Compare.tab[1, 4] = round(Comb.BS, 3)

Compare.tab[2, 1] = "Full"
Compare.tab[2, 2] = round(GetMets(JModel.full)[1,3],2)
Compare.tab[2, 3] = round(GetMets(JModel.full)[2,3],2)
Compare.tab[2, 4] = round(Full.BS, 3)

#Deection Level
Compare.tab1 = as.data.frame(matrix(ncol = 3, nrow= 2))
names(Compare.tab1) = c("Model", "DIC", "WAIC")

Compare.tab1[1, 1] = "Combined"
Compare.tab1[1, 2] = round(GetMets(JModel.comb)[1,2],2)
Compare.tab1[1, 3] = round(GetMets(JModel.comb)[2,2],2)

Compare.tab1[2, 1] = "Full"
Compare.tab1[2, 2] = round(GetMets(JModel.full)[1,2],2)
Compare.tab1[2, 3] = round(GetMets(JModel.full)[2,2],2)
kable(Compare.tab, caption = "Environment Level") %>%
      kable_styling("striped", full_width = F) %>%
      row_spec(0, font_size = 20) %>%
      column_spec(1, bold = T)
Environment Level
Model DIC WAIC MSE
Combined 3494.14 3735.14 0.354
Full 1207.91 1094.29 0.347
kable(Compare.tab1, caption = "Detection Level") %>%
      kable_styling("striped", full_width = F) %>%
      row_spec(0, font_size = 20) %>%
      column_spec(1, bold = T)
Detection Level
Model DIC WAIC
Combined 859.43 1740.38
Full 841.51 1191.30