Load and Prep Observation Data
Michigan Loading raw Michigan data, dropping previously acquired environmental covariates and summing trial successes.
MI.df = read.csv("./Data_102418/mi_data10-24-18.csv",
header = TRUE, stringsAsFactors = FALSE, sep=",")
MI.df = MI.df %>%
mutate(Long = Lat, #these appear reversed in original data
Lat = Lon,
State = "Michigan", #transect state
Spp = "Hare",
OBS = Hare, #overall presence (at any trial)
Counts = T.1 + T.2 + T.3 + T.4 + T.5 + T.6 + T.7 + T.8 + T.9, #Sum across trials
Trials = 9, #No NAs in this dataset all have 9 trials
Data = "Fit",
Site2 = paste("M", 1:nrow(MI.df), sep=".")) %>%
select(Site2, Long, Lat, State, OBS, Counts, Trials, Data, Spp)
dim(MI.df) #Check data
## [1] 125 9
head(MI.df)
## Site2 Long Lat State OBS Counts Trials Data Spp
## 1 M.1 -88.24388 47.41681 Michigan 1 6 9 Fit Hare
## 2 M.2 -84.58309 45.53829 Michigan 1 6 9 Fit Hare
## 3 M.3 -84.14892 45.46854 Michigan 1 6 9 Fit Hare
## 4 M.4 -90.07851 46.35078 Michigan 1 1 9 Fit Hare
## 5 M.5 -86.56820 46.37911 Michigan 1 1 9 Fit Hare
## 6 M.6 -86.75846 46.25573 Michigan 1 9 9 Fit Hare
Wisconsin Loading data as above, but trials vary by transect, therefore, they are individually summed.
WI.df = read.csv("./Data_102418/wi_data10-24-18.csv",
header = TRUE, stringsAsFactors = FALSE, sep=",")
WI.Trial.df = WI.df[2:21] #pull-out transects
WI.Trial.df[WI.Trial.df >= 1] = 1 #set track counts to 1
WI.Trial.df$Counts = rowSums(WI.Trial.df, na.rm=T) #sum total successes by transect
WI.Trial.df$Trials = rowSums(is.na(WI.df[2:21])==FALSE) #count number of trials
WI.df = WI.df %>% #Basically the same as MI above
mutate(Long = Lat,
Lat = Lon,
State = "Wisconsin",
OBS = Hare,
Spp = "Hare",
Data = "Fit",
Counts = WI.Trial.df$Counts, #Sum across trials
Trials = WI.Trial.df$Trials, #trials vary by transect
Site2 = paste("W", 1:nrow(WI.df), sep=".")) %>% #Site identifier
select(Site2, Long, Lat, State, OBS, Counts, Trials, Data, Spp)
dim(WI.df)
## [1] 195 9
head(WI.df)
## Site2 Long Lat State OBS Counts Trials Data Spp
## 1 W.1 -90.72816 44.36486 Wisconsin 1 0 8 Fit Hare
## 2 W.2 -90.84052 44.27223 Wisconsin 0 0 6 Fit Hare
## 3 W.3 -90.82058 44.45853 Wisconsin 1 5 8 Fit Hare
## 4 W.4 -92.11831 45.44736 Wisconsin 0 0 6 Fit Hare
## 5 W.5 -91.22783 45.02406 Wisconsin 0 0 6 Fit Hare
## 6 W.6 -90.01462 44.21396 Wisconsin 0 0 8 Fit Hare
Combine & Convert to Points
Fit.df = rbind(MI.df, WI.df)
Fit.pnt = SpatialPointsDataFrame(Fit.df[, c("Long","Lat")], Fit.df)
proj4string(Fit.pnt) = "+proj=longlat +datum=NAD83 +no_defs +ellps=GRS80 +towgs84=0,0,0"
Defining domain extent, downloading state boundaries, and then creating raster versions for later plotting. Note that the “Lakes” file is used to better identify lake boundaries. A copy of this shapefile is in the folder with this script. The adress to the shapefile will need to be updated in this chunk.
World = map("world",
fill = TRUE,
plot = FALSE)
IDs = sapply(strsplit(World$names, ":"), function(x) x[1])
LL84 = "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
WorldP = map2SpatialPolygons(World, IDs = IDs,
proj4string = CRS(projection(LL84)))
#Add a dataframe
pid = sapply(slot(WorldP, "polygons"),
function(x) slot(x, "ID"))
p.df = data.frame( ID=1:length(WorldP),
row.names = pid)
World = SpatialPolygonsDataFrame(WorldP, p.df)
World = spTransform(World, LL84)
World = gBuffer(World, width = 0, byid = F)
#States
States = map("state",
fill = TRUE,
plot = FALSE)
IDs = sapply(strsplit(States$names, ":"), function(x) x[1])
LL84 = "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
StatesP = map2SpatialPolygons(States, IDs = IDs,
proj4string = CRS(projection(LL84)))
#Add a dataframe
pid = sapply(slot(StatesP, "polygons"),
function(x) slot(x, "ID"))
p.df = data.frame( ID=1:length(StatesP),
row.names = pid)
States = SpatialPolygonsDataFrame(StatesP, p.df)
States = spTransform(States, LL84)
Lakes = readOGR(dsn = "C:/Users/humph173/Documents/Michigan_State/Marten/ArcWork/Lakes",
layer = "Lake_2ks",
stringsAsFactors = FALSE)
## OGR data source with driver: ESRI Shapefile
## Source: "C:\Users\humph173\Documents\Michigan_State\Marten\ArcWork\Lakes", layer: "Lake_2ks"
## with 1 features
## It has 15 fields
## Integer64 fields read as strings: OBJECTID Id area InPoly_FID SimPgnFlag
LakesLL = spTransform(Lakes, proj4string(States))
Ext1 = c(-93.173415, -81.932841, 41.217132, 47.740649)
Ext2 = c(-93.173415, -81.932841, 43.4, 47.740649)
States_map = crop(States, Ext1)
MyStates = subset(States, ID == 21 | ID == 48)
Domain = crop(MyStates, Ext2)
Domain$Name = str_cap_words(rownames(Domain@data))
States_map$Name = str_cap_words(rownames(States_map@data))
Water0 = as(extent(Domain), "SpatialPolygons")
p.df = data.frame(ID=1:length(Water0))
Water0 = SpatialPolygonsDataFrame(Water0, p.df, match.ID = F)
proj4string(Water0) = proj4string(Domain)
Water1 = gDifference(Water0, spTransform(Lakes, proj4string(Water0)))
DomLLU = gUnaryUnion(Domain)
#Rasterized version
Ras = raster(res = 0.02, ext = extent(DomLLU),
crs = proj4string(DomLLU))
Domain.r = rasterize(DomLLU, Ras,
field = 0,
background = NA)
Ras2 = raster(res = 0.02, ext = extent(States_map),
crs = proj4string(States_map))
States.r = rasterize(States_map, Ras2,
field = 0,
background = NA)
#Dense for hi-res prediction
Zext = as(raster::extent(-92, -91, 45, 46), "SpatialPolygons")
proj4string(Zext) = proj4string(States)
Z_samp = disaggregate(crop(Domain.r, Zext), fact=6)
zPred.pnts = rasterToPoints(Z_samp, sp=T)
zPred.pnts@data = zPred.pnts@data %>%
mutate(Long = zPred.pnts@coords[,1],
Lat = zPred.pnts@coords[,2],
Spp = "Zoom") %>%
select(-layer)
#Point grid version Statewide prediction)
Grd.pnt = rasterToPoints(Domain.r, spatial = TRUE)
Grd.pnt@data = Grd.pnt@data %>%
mutate(Long = Grd.pnt@coords[,1],
Lat = Grd.pnt@coords[,2],
Spp = "Grid") %>%
select(-layer)
Grd.pnt.df = rbind(Grd.pnt@data, zPred.pnts@data)
Grd.pnt = SpatialPointsDataFrame(Grd.pnt.df[,c("Long","Lat")], Grd.pnt.df)
proj4string(Grd.pnt) = proj4string(States)
Quick Plot to Check Data Locations used to fit Model.
rng = seq(0, 255, 1)
mCols = brewer.pal(11, "RdYlBu")[-6]
cr0 = rev(colorRampPalette((mCols))(n = 256))
cr = colorRampPalette(c("tan", cr0),
bias = 1, space = "rgb")
MyMatrix = matrix(nrow=2, ncol=2)
rownames(MyMatrix) = c("michigan", "wisconsin")
MyMatrix[,1] = c(-84.46125, -89.51171)
MyMatrix[,2] = c(43.61285, 43.63285)
Hare.LLpnt = spTransform(Fit.pnt, proj4string(States))
levelplot(States.r,
margin = FALSE,
xlab = NULL,
ylab = NULL,
maxpixels = 1e5,
col.regions = cr, at = rng,
colorkey = FALSE, par.strip.text = list(fontface='bold', cex=1.5),
par.settings = list(axis.line = list(col = "black"),
strip.background = list(col = 'transparent'),
strip.border = list(col = 'transparent')),
scales = list(cex = 1.25)) +
latticeExtra::layer(sp.polygons(States_map, col = "black", lwd = 0.5)) +
latticeExtra::layer(sp.polygons(Domain, col = "black", fill = "lightgray", lwd = 2)) +
latticeExtra::layer(sp.polygons(LakesLL, fill = "lightblue", col = "transparent", lwd = 0.5)) +
latticeExtra::layer(sp.polygons(Hare.LLpnt , col = "red", pch=factor(Hare.LLpnt$OBS), cex = 1)) +
latticeExtra::layer(sp.text(MyMatrix, txt = Domain$Name,
pos =c(1,3,3,2,2,1,1),
col="black",font=list(face="bold"), cex=1)) +
latticeExtra::layer({SpatialPolygonsRescale(layout.north.arrow(),
offset = c(-83, 45.5),
scale = 2)})
Project Everything to KM
nProj = "+proj=utm +zone=16 +datum=NAD83 +units=km +no_defs +ellps=GRS80 +towgs84=0,0,0"
LakesKM = spTransform(Lakes, nProj)
FocalKM = spTransform(Water1, nProj)
DomP = spTransform(Domain, nProj)
DomPU = gUnaryUnion(DomP)
Fit.pntP = spTransform(Fit.pnt, nProj)
Fit.pntPX = subset(Fit.pntP, Spp == "Hare" & Data == "Fit")
Grd.pntP = spTransform(Grd.pnt, nProj)
These values are scaled to correspond to the geographic projection (kilometers)
max.edge = 5 #Make the outer edge length 5km
bound.outer = 75 #Outer extension can be 75km
bdry = inla.sp2segment(DomPU) #Formatting boundary for r-INLA
mesh = inla.mesh.2d(boundary = bdry, #Boundary
loc = Fit.pntPX, #Fit to point locations
max.edge = c(1, 5)*max.edge, #mesh size specifications
cutoff = 5.5,
min.angle = 25,
offset = c(max.edge, bound.outer))
mesh$n #number of nodes
## [1] 6073
plot(mesh, lwd=0.5)
Define Spatial Barriers
Article (Bakka 2019): https://www.sciencedirect.com/science/article/pii/S221167531830099X See tutorial: https://haakonbakka.bitbucket.io/btopic107.html
tl = length(mesh$graph$tv[,1])
posTri = matrix(0, tl, 2)
for (t in 1:tl){
temp = mesh$loc[mesh$graph$tv[t, ], ]
posTri[t,] = colMeans(temp)[c(1,2)]
}
posTri = SpatialPoints(posTri)
proj4string(posTri) = proj4string(FocalKM)
normal = over(DomPU, posTri, returnList=T)
normal = unlist(normal)
barrier.triangles = setdiff(1:tl, normal)
poly.barrier = inla.barrier.polygon(mesh, barrier.triangles)
Plot Mesh with Barriers
Red areas are barriers.
plot(mesh, main="Mesh")
plot(poly.barrier, add=T, col='red')
plot(mesh, add=T)
Wisconsin
WIV.df = read.csv("./WI_HareValid2000-18.csv",
header = TRUE, stringsAsFactors = FALSE, sep=",")
WIV.df = WIV.df %>%
select(X, Y, HARESA_12_, HARESA_13_, HARESA_14_)
WIV.df[is.na(WIV.df)] = 0
WIV.df = WIV.df %>%
mutate(Site2 = paste("WIV_", 1:dim(WIV.df)[1]),
Long = X,
Lat = Y,
Source = "WI",
Spp = "Valid",
State = "Wisconsin",
Data = "Valid",
Counts = (HARESA_12_ + HARESA_13_ + HARESA_14_),
OBS = ifelse(Counts >= 1, 1, 0),
Trials = 1) %>%
select(Site2, Long, Lat, State, OBS, Counts, Trials, Data, Spp, Source)
dim(WIV.df)
## [1] 51 10
head(WIV.df)
## Site2 Long Lat State OBS Counts Trials Data Spp Source
## 1 WIV_ 1 459786.6 640333.1 Wisconsin 0 0 1 Valid Valid WI
## 2 WIV_ 2 459119.3 623022.5 Wisconsin 0 0 1 Valid Valid WI
## 3 WIV_ 3 448003.0 645595.7 Wisconsin 1 20 1 Valid Valid WI
## 4 WIV_ 4 544112.4 409977.9 Wisconsin 1 40 1 Valid Valid WI
## 5 WIV_ 5 429676.8 692700.4 Wisconsin 1 16 1 Valid Valid WI
## 6 WIV_ 6 323853.0 603638.8 Wisconsin 1 58 1 Valid Valid WI
#NAD_1983_HARN_Wisconsin_TM
WIval.nproj = "+proj=tmerc +lat_0=0 +lon_0=-90 +k=0.9996 +x_0=520000 +y_0=-4480000 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
WIv.pnt = SpatialPointsDataFrame(WIV.df[, c("Long","Lat")], WIV.df)
proj4string(WIv.pnt) = WIval.nproj
WIv.pnt = spTransform(WIv.pnt, nProj)
Hare (Sault Tribe)
library("readxl")
files = list.files(path="C:/Users/humph173/Documents/Michigan_State/Eric/Pellets",
pattern="*.xlsx", full.names=T, recursive=FALSE)
for(i in 1:length(files)) {
tmp.ex = as.data.frame(read_excel(files[i]))[,1:2]
names(tmp.ex) = c("X", "Y")
if(i == 1){Pellet.df = tmp.ex}
else{Pellet.df = rbind(Pellet.df, tmp.ex)}
}
## New names:
## * `` -> ...3
## New names:
## * `` -> ...3
STV.df = Pellet.df %>%
mutate(Site2 = paste("STV_", 1:dim(Pellet.df)[1]),
Long = X,
Lat = Y,
Source = "ST",
Spp = "Valid",
State = "Michigan",
Data = "Valid",
Counts = 1,
OBS = 1,
Trials = 1) %>%
select(Site2, Long, Lat, State, OBS, Counts, Trials, Data, Spp, Source)
head(STV.df)
## Site2 Long Lat State OBS Counts Trials Data Spp Source
## 1 STV_ 1 649079.4 5120467 Michigan 1 1 1 Valid Valid ST
## 2 STV_ 2 661239.2 5135238 Michigan 1 1 1 Valid Valid ST
## 3 STV_ 3 661243.7 5135231 Michigan 1 1 1 Valid Valid ST
## 4 STV_ 4 661246.0 5135232 Michigan 1 1 1 Valid Valid ST
## 5 STV_ 5 661253.6 5135214 Michigan 1 1 1 Valid Valid ST
## 6 STV_ 6 661249.1 5135212 Michigan 1 1 1 Valid Valid ST
dim(STV.df)
## [1] 847 10
MnProj = "+proj=utm +zone=16 +datum=NAD83 +units=m +no_defs +ellps=GRS80 +towgs84=0,0,0"
STV.pnt = SpatialPointsDataFrame(STV.df[, c("Long","Lat")], STV.df)
proj4string(STV.pnt) = MnProj
STV.pnt = spTransform(STV.pnt, nProj)
Predator Prey Data
PPV.df = read.csv("C:/Users/humph173/Documents/Michigan_State/Marten/Data/PredPrey/Pred_prey_Comb_121218.csv",
header = TRUE, sep=",")
PPV.df = PPV.df %>%
mutate(Site2 = paste("PPV_", 1:dim(PPV.df)[1]),
Long = Easting,
Lat = Northing,
Source = "PP",
Spp = "Valid",
State = "Michigan",
Data = "Valid",
Counts = 1,
OBS = 1,
Trials = 1) %>%
select(Site2, Long, Lat, State, OBS, Counts, Trials, Data, Spp, Source)
head(PPV.df)
## Site2 Long Lat State OBS Counts Trials Data Spp Source
## 1 PPV_ 1 399299 5117030 Michigan 1 1 1 Valid Valid PP
## 2 PPV_ 2 399299 5117030 Michigan 1 1 1 Valid Valid PP
## 3 PPV_ 3 411292 5117450 Michigan 1 1 1 Valid Valid PP
## 4 PPV_ 4 400838 5119454 Michigan 1 1 1 Valid Valid PP
## 5 PPV_ 5 400838 5119454 Michigan 1 1 1 Valid Valid PP
## 6 PPV_ 6 396858 5127841 Michigan 1 1 1 Valid Valid PP
dim(PPV.df)
## [1] 25939 10
PPV.pnt = SpatialPointsDataFrame(PPV.df[, c("Long","Lat")], PPV.df)
proj4string(PPV.pnt) = MnProj
PPV.pnt = spTransform(PPV.pnt, nProj)
Lower Survey (Pellet )
LPS.df = read.csv("./2019_pellet_sites.csv",
header = TRUE, sep=",")
LPS.df = LPS.df %>%
mutate(Site2 = paste("PPV_", 1:dim(LPS.df)[1]),
Long = Latitude, #Reversed in data
Lat = Longitude,
Source = "LPP",
Spp = "Valid",
State = "Michigan",
Data = "Valid",
Counts = 1,
OBS = CurrentOcc,
Trials = 1) %>%
filter(plot_ID != "main") %>% #Remove Original Surveys
select(Site2, Long, Lat, State, OBS, Counts, Trials, Data, Spp, Source)
head(LPS.df)
## Site2 Long Lat State OBS Counts Trials Data Spp Source
## 1 PPV_ 1 619035.8 519897.4 Michigan 1 1 1 Valid Valid LPP
## 2 PPV_ 2 617098.3 528769.7 Michigan 1 1 1 Valid Valid LPP
## 3 PPV_ 4 620157.4 513569.1 Michigan 1 1 1 Valid Valid LPP
## 4 PPV_ 5 609769.3 522842.3 Michigan 0 1 1 Valid Valid LPP
## 5 PPV_ 6 608475.4 518285.8 Michigan 1 1 1 Valid Valid LPP
## 6 PPV_ 7 613170.1 511702.0 Michigan 0 1 1 Valid Valid LPP
dim(LPS.df)
## [1] 31 10
#Long & Lat Reversed & not UTM
#NAD_1983_2011_Michigan_GeoRef_Meters
StateGeoref.Proj = "+proj=omerc +lat_0=45.30916666666666 +lonc=-86 +alpha=337.25556 +k=0.9996 +x_0=2546731.496 +y_0=-4354009.816 +no_uoff +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
LPS.pnt = SpatialPointsDataFrame(LPS.df[, c("Long","Lat")], LPS.df)
proj4string(LPS.pnt) =StateGeoref.Proj
LPS.pnt = spTransform(LPS.pnt, nProj)
Update Coordinates and Combine
Fit.pnt.tab = Fit.pntP@data %>%
mutate(Source = "Fit",
Long = Fit.pntP@coords[,1],
Lat = Fit.pntP@coords[,2])
WIv.pnt.tab = WIv.pnt@data %>%
mutate(Long = WIv.pnt@coords[,1],
Lat = WIv.pnt@coords[,2])
PPV.pnt.tab = PPV.pnt@data %>%
mutate(Long = PPV.pnt@coords[,1],
Lat = PPV.pnt@coords[,2])
STV.pnt.tab = STV.pnt@data %>%
mutate(Long = STV.pnt@coords[,1],
Lat = STV.pnt@coords[,2])
LPS.pnt.tab = LPS.pnt@data %>%
mutate(Long = LPS.pnt@coords[,1],
Lat = LPS.pnt@coords[,2])
Combine All Data Join data to common dataframe.
hare.df = rbind(Fit.pnt.tab, WIv.pnt.tab, STV.pnt.tab, PPV.pnt.tab, LPS.pnt.tab)
dim(hare.df)
## [1] 27188 10
hare.df %>%
group_by(State, Data) %>%
summarise(Count = length(Data))
## # A tibble: 4 x 3
## # Groups: State [2]
## State Data Count
## <chr> <chr> <int>
## 1 Michigan Fit 125
## 2 Michigan Valid 26817
## 3 Wisconsin Fit 195
## 4 Wisconsin Valid 51
Convert to Points
Hare.pnt = SpatialPointsDataFrame(hare.df[, c("Long","Lat")], hare.df)
proj4string(Hare.pnt) = nProj
Quick Plot to Check Validation Data Locations used for model validation.
rng = seq(0, 255, 1)
mCols = brewer.pal(11, "RdYlBu")[-6]
cr0 = rev(colorRampPalette((mCols))(n = 256))
cr = colorRampPalette(c("tan", cr0),
bias = 1, space = "rgb")
MyMatrix = matrix(nrow=2, ncol=2)
rownames(MyMatrix) = c("michigan", "wisconsin")
MyMatrix[,1] = c(-84.46125, -89.51171)
MyMatrix[,2] = c(43.61285, 43.63285)
Hare.LLpnt = spTransform(Hare.pnt, proj4string(States))
Hare.LLpnt = subset(Hare.LLpnt, Data == "Valid")
levelplot(States.r,
margin = FALSE,
xlab = NULL,
ylab = NULL,
maxpixels = 1e5,
col.regions = cr, at = rng,
colorkey = FALSE, par.strip.text = list(fontface='bold', cex=1.5),
par.settings = list(axis.line = list(col = "black"),
strip.background = list(col = 'transparent'),
strip.border = list(col = 'transparent')),
scales = list(cex = 1.25)) +
latticeExtra::layer(sp.polygons(States_map, col = "black", lwd = 0.5)) +
latticeExtra::layer(sp.polygons(Domain, fill = "lightgray", lwd = 2)) +
latticeExtra::layer(sp.polygons(LakesLL, fill = "lightblue", col = "transparent", lwd = 0.5)) +
latticeExtra::layer(sp.polygons(Hare.LLpnt , col = "red", pch=factor(Hare.pnt$Data), cex = 1)) +
latticeExtra::layer(sp.text(MyMatrix, txt = Domain$Name,
pos =c(1,3,3,2,2,1,1),
col="black",font=list(face="bold"), cex=1)) +
latticeExtra::layer({SpatialPolygonsRescale(layout.north.arrow(),
offset = c(-83, 45.5),
scale = 2)})
Combine Observations, Mesh Nodes, and Grid Points
#Node coordinates
dd = as.data.frame(cbind(mesh$loc[,1],
mesh$loc[,2]))
names(dd) = c("Long", "Lat") #name coordinates
dd$OBS = 0 #no hare at these locations
dd$Site2 = paste("N", 1:nrow(dd), sep = ".") #to match with observation data
dd$State = "All"
dd$Spp = "Mesh"
dd$Counts = 0
dd$Trials = 0
dd$Source = "Mesh"
dd$Data = "Fit"
#Hare Obs
hare.set = Hare.pnt@data %>%
mutate(Long = Hare.pnt@coords[,1],
Lat = Hare.pnt@coords[,2])
#Grid
grid.set = Grd.pntP@data %>%
mutate(Long = Grd.pntP@coords[,1],
Lat = Grd.pntP@coords[,2],
Spp = Spp,
OBS = 0,
State = "All",
Counts = 0,
Trials = 0,
Data = "grid",
Source = "Grid",
Site2 = paste("G", 1:nrow(Grd.pntP@data), sep = "."))
All.pnts = rbind(hare.set, dd, grid.set)
All.pnts = SpatialPointsDataFrame(All.pnts[, c("Long","Lat")], All.pnts)
proj4string(All.pnts) = nProj
Forest1km = raster("C:/Users/humph173/Documents/Michigan_State/Sean/Loop_020819/Forest1km.grd")
mSnow5Yr = raster("./Hare1/Mean5yrSnow.tif")
mxTemp = raster("./Hare1/meanTMAX.tif")
All.pnts$mSnow5yrE = extract(mSnow5Yr,
spTransform(All.pnts,
CRS(proj4string(mSnow5Yr))),
method="simple")
All.pnts$mSnow5yrE[is.na(All.pnts$mSnow5yrE)] = mean(All.pnts$mSnow5yrE, na.rm=T)
All.pnts$mxTempE = extract(mxTemp,
spTransform(All.pnts,
CRS(proj4string(mxTemp))),
method="simple")
All.pnts$mxTempE[is.na(All.pnts$mxTempE)] = mean(All.pnts$mxTempE, na.rm=T)
All.pnts$Forest1kmE = extract(Forest1km,
spTransform(All.pnts,
CRS(proj4string(Forest1km))),
method="simple")
All.pnts$Forest1kmE[is.na(All.pnts$Forest1kmE)] = mean(All.pnts$Forest1kmE, na.rm=T)
All.pnts$Forest1kmE = round(All.pnts$Forest1kmE/100, 1)
# 2019 Lower ###################
mSnow5Yr2019 = raster("./Mean5yrlp2019.tif")
mxTemp2019 = raster("./meanTMAXLP2019.tif")
All.pnts$mSnow5yrE2 = extract(mSnow5Yr2019,
spTransform(All.pnts,
CRS(proj4string(mSnow5Yr2019))),
method="simple")
All.pnts$mSnow5yrE2[is.na(All.pnts$mSnow5yrE2)] = mean(All.pnts$mSnow5yrE2, na.rm=T)
All.pnts$mxTempE2 = extract(mxTemp2019,
spTransform(All.pnts,
CRS(proj4string(mxTemp2019))),
method="simple")
All.pnts$mxTempE2[is.na(All.pnts$mxTempE2)] = mean(All.pnts$mxTempE2, na.rm=T)
#Select Variable Year
All.pnts$mSnow5yrE = ifelse(All.pnts$Source == "LPP", All.pnts$mSnow5yrE2, All.pnts$mSnow5yrE)
All.pnts$mxTempE2 = ifelse(All.pnts$Source == "LPP", All.pnts$mxTempE2, All.pnts$mxTempE)
#Clean up frame
All.pnts@data = All.pnts@data %>% select(-c(mSnow5yrE2, mxTempE2))
Identify the UP Identify locations from the U.P. based on a well-defined county boundaries
UP = readOGR(dsn = "C:/Users/humph173/Documents/Michigan_State/SLP_Beam_Diam/Counties_v17a",
layer = "MI_UP",
stringsAsFactors = FALSE)
## OGR data source with driver: ESRI Shapefile
## Source: "C:\Users\humph173\Documents\Michigan_State\SLP_Beam_Diam\Counties_v17a", layer: "MI_UP"
## with 15 features
## It has 15 fields
## Integer64 fields read as strings: OBJECTID FIPSNUM
XX = readOGR(dsn = "C:/Users/humph173/Documents/Michigan_State/Sean/Hare1/CoordTest",
layer = "Test7b",
stringsAsFactors = FALSE)
## OGR data source with driver: ESRI Shapefile
## Source: "C:\Users\humph173\Documents\Michigan_State\Sean\Hare1\CoordTest", layer: "Test7b"
## with 35 features
## It has 13 fields
## Integer64 fields read as strings: FID_1 Site CurrentOcc Site_ID BUFF_DIST
UPp = gUnaryUnion(spTransform(UP, proj4string(All.pnts)))
All.pnts$UP = is.na(over(All.pnts, UPp))
Update State Designation to Include UP Cleaning up labels
All.pnts$StateUP = ifelse(All.pnts$UP == FALSE, "Mich.UP", All.pnts$State)
All.pnts$StateUPW = ifelse(All.pnts$StateUP == "Mich.UP", "Wisconsin", All.pnts$StateUP)
All.pnts$Domain = over(All.pnts, DomPU)
levels(factor(All.pnts$StateUP))
## [1] "All" "Mich.UP" "Michigan" "Wisconsin"
levels(factor(All.pnts$StateUPW))
## [1] "All" "Michigan" "Wisconsin"
Seperate Datasets
Hare.mod = subset(All.pnts, Spp == "Hare") #Observations only
Mesh.mod = subset(All.pnts, Spp == "Mesh" & is.na(Domain) == FALSE) #Mesh locations excluding buffer extension
HareMesh.mod = subset(All.pnts, Spp != "Grid" & Spp != "Valid") #Observations excluding Grid points for prediction/plotting
HareMesh.mod$ID = 1:nrow(HareMesh.mod@data)
Validation.set = subset(All.pnts, Data == "Valid")
Grd.pnts = subset(All.pnts, Spp == "Grid") #Grid locations for prediction/plotting
zGrd.pnts = subset(All.pnts, Spp == "Zoom") #zoomed locations for prediction/plotting
Add Background Points to Validation Set
Validation.set2 = spRbind(Validation.set, Mesh.mod)
Validation.set2@data %>%
group_by(OBS) %>%
summarise(Cnt =length(OBS))
## # A tibble: 2 x 2
## OBS Cnt
## <dbl> <int>
## 1 0 4864
## 2 1 26857
Validation.set2$dups = duplicated(Validation.set2@data[,c("Long","Lat")])
Validation.set2 = subset(Validation.set2, dups == FALSE)
Index value needs to be below 30. This suggest colineartity will not be an issue.
library(perturb)
##
## Attaching package: 'perturb'
## The following object is masked from 'package:raster':
##
## reclassify
Colin.df = HareMesh.mod@data %>% #select covariates
select(mSnow5yrE, mxTempE, Forest1kmE)
CorCov = cor(Colin.df) #calculate correlation
corrplot(CorCov) #view correlation table
CI = colldiag(CorCov) #Apply metric
CI
## Condition
## Index Variance Decomposition Proportions
## intercept mSnow5yrE mxTempE Forest1kmE
## 1 1.000 0.086 0.053 0.143 0.092
## 2 1.406 0.523 0.008 0.843 0.011
## 3 3.164 0.391 0.939 0.014 0.897
Define Regions (Three Regions) Converting each region to an integer value to be used to internally “replicate” estimates by region.
HR.df = HareMesh.mod@data
levels(factor(HR.df$StateUP)) #Level names
## [1] "All" "Mich.UP" "Michigan" "Wisconsin"
HR.df$StateUPWI = as.integer(as.factor(HR.df$StateUP)) #convert to integer
levels(factor(HR.df$StateUPWI)) #levels as integer
## [1] "1" "2" "3" "4"
#Keep U.P. Hare observations as "1", set other regions to "0"
MupSet = HR.df
MupSet$OBS2 = ifelse(MupSet$StateUPWI == "2", MupSet$OBS, 0)
#Keep Lower MI Hare observations as "1", set other regions to "0"
MSet = HR.df
MSet$OBS2 = ifelse(MSet$StateUPWI == "3", MSet$OBS, 0)
#Keep Wisconsin Hare observations as "1", set other regions to "0"
WSet = HR.df
WSet$OBS2 = ifelse(WSet$StateUPWI == "4", WSet$OBS, 0)
MupSet$Rep = 1 #Renumbering from 1-3 instead of 2-4
MSet$Rep = 2
WSet$Rep = 3
HR.df = rbind(MupSet, MSet, WSet) #Join data
HR.df %>% #Count of hare observations by region
group_by(Rep) %>%
summarise(Cnt = sum(OBS2))
## # A tibble: 3 x 2
## Rep Cnt
## <dbl> <dbl>
## 1 1 38
## 2 2 38
## 3 3 53
Relating observations to mesh locations as a matrix and defining flat spatial priors.
#Relate mesh for detection level
locs = cbind(Hare.mod@coords[,1], Hare.mod@coords[,2]) #point locations
A.det = inla.spde.make.A(mesh, #the mesh
alpha = 2, #default setting
loc=locs) #our locations
#Relate mesh for covariate level
locs = cbind(HR.df[,"Long"], HR.df[,"Lat"])
A.env = inla.spde.make.A(mesh,
alpha = 2,
loc=locs)
#Prior for barrier model
barrier.model = inla.barrier.pcmatern(mesh, #mesh
barrier.triangles = barrier.triangles, #Lake boundaries
prior.range = c(1000, 0.5), #0.5 probabilty of effect within 1000 km
prior.sigma = c(1, 0.01))
#Same as above, but without barriers
spde = inla.spde2.pcmatern(mesh,
prior.range = c(1000, 0.5),
prior.sigma = c(1, 0.01))
#Create index to track locations of mesh nodes
field.det = inla.spde.make.index("field.det", spde$n.spde) #index for detection level
field.det.c = inla.spde.make.index("field.det.c", spde$n.spde) #copy of above to pass to covariate level
field.env = inla.spde.make.index("field.env", spde$n.spde) #index for covariate level
locs = cbind(HareMesh.mod@coords[,1], HareMesh.mod@coords[,2])
A.base = inla.spde.make.A(mesh,
alpha = 2,
loc=locs)
field.base = inla.spde.make.index("field.base", spde$n.spde)
HR.df2 = HareMesh.mod@data
base.lst = list(c(field.base,
list(intercept3 = 1)),
list(XX = HR.df2[,"Long"])) #Placeholder only
base.stk = inla.stack(data = list(Y = HR.df2$OBS), #Standard model for comparison
A = list(A.base, 1),
effects = base.lst,
tag = "base.0")
Need to use list objects rather than data frames.
#Detection level
DT.df = Hare.mod@data
DT.lst = list(c(field.det, #Spatial index
list(intercept1 = 1)), #Intercept
list(XX = DT.df[,"Lat"], #List of variables/covariates (placeholder for detection)
Site = DT.df[,"Site2"])) #Site identifier (to allow sites to idependently vary)
detect.stk = inla.stack(data = list(Y = DT.df$Counts, #Number of successes
Field.trials = DT.df$Trials), #trials
A = list(A.det, 1), #Projection matrix
effects = DT.lst, #Spatial index and covariates
tag = "det.0") #just a name/tag
jdetect.stk = inla.stack(data = list(Y = cbind(DT.df$Counts, NA), #"NA" = space for next model level
Field.trials = DT.df$Trials), #as above
A = list(A.det, 1),
effects = DT.lst,
tag = "jdet.0")
#Environment/covariate level
HR.lst = list(c(field.env, #index for covariate level
field.det.c, #copy of field from detection level
list(intercept2 = 1)), #intercept
list(mSnw5yr = round(HR.df[,"mSnow5yrE"], 3), #Snow weeks
mMxTemp = round(HR.df[,"mxTempE"], 3), #Temperature
Forest = HR.df[,"Forest1kmE"], #Forest
Region = HR.df[,"Rep"])) #Region identifier
env.stk = inla.stack(data = list(Y = HR.df$OBS, #Standard model for comparison
Field.trials = rep(1, dim(HR.df)[1])), #1 trial
A = list(A.env, 1),
effects = HR.lst,
tag = "env.0")
jenv.stk = inla.stack(data = list(Y = cbind(NA, HR.df$OBS2), #NA = space for detection level
Field.trials = rep(1, dim(HR.df)[1])),
A = list(A.env, 1),
effects = HR.lst,
tag = "jenv.0")
#Combine detection and covariate levels
Joint.stk = inla.stack(jdetect.stk, jenv.stk)
#Save data to run in HPCC
#save(list=c("Joint.stk", "barrier.model", "spde"), file="./HPC/Apr23/Comb_0423.RData") #File for HPCC
Three Regions Model The joint models are computationaly expensive, so there were ran on the HPCC.
Site.prior = list(theta=list(prior = "normal", param=c(0, 3))) #Prior for site effect
Reg.prior = list(theta=list(prior = "normal", param=c(0, 3))) #prior for region
JFrm0 = Y ~ -1 + intercept1 + #intercept (detection level)
intercept2 + #Intercept (covariate level)
f(field.det, #spatial index (detection)
model=barrier.model) + #change "barrier.model" to "spde" for no-barrier version.
f(field.det.c, #Shared spatial field to account for correlation between model levels
copy = "field.det", #copied from detection level of model
fixed = FALSE) +
f(field.env, #spatial index for covariate level
model=barrier.model) +
f(Site, #Site identifier
model="iid",
hyper=Site.prior) + #site prior
f(mMxTemp, #temperature
model="rw1", #random walk of order 1
replicate = Region, #replicate for each region
hyper = Reg.prior) + #prior
f(mSnw5yr, #weekly snowfall
model="rw1",
replicate = Region,
hyper = Reg.prior) +
f(Forest, #Forest
model="iid",
replicate = Region,
hyper = Reg.prior)
#thetaJ = JModel.full$internal.summary.hyperpar$mean #from initial run (to speed up model)
thetaJ = c(1.1935975, 3.7979780, -2.1432853, 6.6863289, -0.6710575, -0.1090578, -0.9891725, 0.6587151, 1)
JModel.full = inla(JFrm0, #formula
data = inla.stack.data(Joint.stk), #data list object
family = c("binomial","binomial"), #families for detection and covariate levels
verbose = FALSE, #Show running process
Ntrials = inla.stack.data(Joint.stk)$Field.trials, #number trials (varies for detection level)
control.fixed = list(prec = 0.001, #priors for intercept
prec.intercept = 0.0001),
control.predictor = list(
A = inla.stack.A(Joint.stk), #data again
compute = TRUE, #estimate fitted values
link = 1), #transform fitted values from logit
control.mode = list(restart = TRUE, theta = thetaJ), #to speed up
control.inla = list(strategy="gaussian", #to speed up
int.strategy = "eb"),
control.results = list(return.marginals.random = TRUE, #results to report
return.marginals.predictor = TRUE),
control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE)) #calculate indicies for comparison
Load Results from HPCC
load("~/Michigan_State/Sean/HPC/Apr23/Full_042419.RData")
Model Summary
summary(JModel.full)
##
## Call:
## c("inla(formula = JFrm0, family = c(\"binomial\", \"binomial\"), data = inla.stack.data(Joint.stk), ", " Ntrials = inla.stack.data(Joint.stk)$Field.trials, verbose = TRUE, ", " control.compute = list(dic = TRUE, cpo = TRUE, waic = TRUE), ", " control.predictor = list(A = inla.stack.A(Joint.stk), compute = TRUE, ", " link = 1), control.inla = list(strategy = \"gaussian\", ", " int.strategy = \"eb\"), control.results = list(return.marginals.random = TRUE, ", " return.marginals.predictor = TRUE), control.fixed = list(prec = 0.001, ", " prec.intercept = 1e-04), control.mode = list(restart = TRUE, ", " theta = thetaJ), num.threads = 8)")
##
## Time used:
## Pre-processing Running inla Post-processing Total
## 3.2507 17036.6299 3.5429 17043.4235
##
## Fixed effects:
## mean sd 0.025quant 0.5quant 0.975quant mode kld
## intercept1 -4.4311 0.3917 -5.2001 -4.4311 -3.6628 -4.4311 0
## intercept2 -7.8368 0.5061 -8.8304 -7.8369 -6.8441 -7.8368 0
##
## Random effects:
## Name Model
## field.det RGeneric2
## field.env RGeneric2
## Site IID model
## mMxTemp RW1 model
## mSnw5yr RW1 model
## Forest RW1 model
## field.det.c Copy
##
## Model hyperparameters:
## mean sd 0.025quant 0.5quant 0.975quant
## Theta1 for field.det 1.1762 0.1045 0.9682 1.1776 1.3787
## Theta2 for field.det 3.4437 0.1491 3.1587 3.4403 3.7440
## Theta1 for field.env -1.5183 0.7230 -3.1238 -1.4325 -0.3365
## Theta2 for field.env 6.2684 0.6684 5.0977 6.2134 7.7026
## Precision for Site 0.8279 0.2975 0.4102 0.7723 1.5617
## Precision for mMxTemp 0.6399 0.2701 0.2812 0.5840 1.3159
## Precision for mSnw5yr 1.2560 0.4851 0.5576 1.1735 2.4377
## Precision for Forest 1.7432 0.6898 0.7675 1.6203 3.4337
## Beta for field.det.c 0.5375 0.0657 0.4096 0.5371 0.6674
## mode
## Theta1 for field.det 1.1824
## Theta2 for field.det 3.4281
## Theta1 for field.env -1.1039
## Theta2 for field.env 6.0098
## Precision for Site 0.6751
## Precision for mMxTemp 0.4913
## Precision for mSnw5yr 1.0233
## Precision for Forest 1.4011
## Beta for field.det.c 0.5356
##
## Expected number of effective parameters(std dev): 368.64(0.00)
## Number of equivalent replicates : 52.90
##
## Deviance Information Criterion (DIC) ...............: 2355.37
## Deviance Information Criterion (DIC, saturated) ....: NULL
## Effective number of parameters .....................: 605.81
##
## Watanabe-Akaike information criterion (WAIC) ...: 2747.21
## Effective number of parameters .................: 623.77
##
## Marginal log-Likelihood: -31631.59
## CPO and PIT are computed
##
## Posterior marginals for linear predictor and fitted values computed
Treating Wisconsin and the UP as a single/combined region.
HR2.df = HareMesh.mod@data
levels(factor(HR2.df$StateUP)) #Level names
## [1] "All" "Mich.UP" "Michigan" "Wisconsin"
HR2.df$StateUPWI = as.integer(as.factor(HR2.df$StateUP)) #convert to integer
levels(factor(HR2.df$StateUPWI)) #levels as integer
## [1] "1" "2" "3" "4"
#Keep U.P. and WI Hare observations as "1", set Michigan (Lower) locations to "0"
UP_WI_Set = HR2.df
UP_WI_Set$OBS3 = ifelse(UP_WI_Set$StateUPWI == "2" |
UP_WI_Set$StateUPWI == "4", UP_WI_Set$OBS, 0)
#Keep Lower MI Hare observations as "1", set other regions to "0"
MI_Set = HR2.df
MI_Set$OBS3 = ifelse(MI_Set$StateUPWI == "3", MI_Set$OBS, 0)
UP_WI_Set$Rep = 1 #Renumbering from 1-3 instead of 2-4
MI_Set$Rep = 2
HR2.df = rbind(UP_WI_Set, MI_Set) #Join data
HR2.df %>% #Count of hare observations by region
group_by(Rep) %>%
summarise(Cnt = sum(OBS3))
## # A tibble: 2 x 2
## Rep Cnt
## <dbl> <dbl>
## 1 1 91
## 2 2 38
Two Regions Model Set-up As above.
#Relate mesh for detection level
locs = cbind(Hare.mod@coords[,1], Hare.mod@coords[,2]) #point locations
A.det = inla.spde.make.A(mesh, #the mesh
alpha = 2, #default setting
loc=locs) #our locations
#Relate mesh for covariate level
locs = cbind(HR2.df[,"Long"], HR2.df[,"Lat"])
A.env = inla.spde.make.A(mesh,
alpha = 2,
loc=locs)
#Prior for barrier model
barrier.model = inla.barrier.pcmatern(mesh, #mesh
barrier.triangles = barrier.triangles, #Lake boundaries
prior.range = c(1000, 0.5), #0.5 probabilty of effect within 1000 km
prior.sigma = c(1, 0.01))
#Same as above, but without barriers
spde = inla.spde2.pcmatern(mesh,
prior.range = c(1000, 0.5),
prior.sigma = c(1, 0.01))
#Create index to track locations of mesh nodes
field.det = inla.spde.make.index("field.det", spde$n.spde) #index for detection level
field.det.c = inla.spde.make.index("field.det.c", spde$n.spde) #copy of above to pass to covariate level
field.env = inla.spde.make.index("field.env", spde$n.spde) #index for covariate level
Organize Two Regions Data for Fitting
#Detection level
DT.df = Hare.mod@data
DT.lst = list(c(field.det, #Spatial index
list(intercept1 = 1)), #Intercept
list(XX = DT.df[,"Lat"], #List of variables/covariates (placeholder for detection)
Site = DT.df[,"Site2"])) #Site identifier (to allow sites to idependently vary)
jdetect.stk = inla.stack(data = list(Y = cbind(DT.df$Counts, NA), #"NA" = space for next model level
Field.trials = DT.df$Trials), #as above
A = list(A.det, 1),
effects = DT.lst,
tag = "jdet.0")
#Environment/covariate level
HR.lst = list(c(field.env, #index for covariate level
field.det.c, #copy of field from detection level
list(intercept2 = 1)), #intercept
list(mSnw5yr = round(HR2.df[,"mSnow5yrE"], 3), #Snow weeks
mMxTemp = round(HR2.df[,"mxTempE"], 3), #Temperature
Forest = HR2.df[,"Forest1kmE"], #Forest
Region = HR2.df[,"Rep"])) #Region identifier
jenv.stk = inla.stack(data = list(Y = cbind(NA, HR2.df$OBS3), #NA = space for detection level
Field.trials = rep(1, dim(HR2.df)[1])),
A = list(A.env, 1),
effects = HR.lst,
tag = "jenv.0")
#Combine detection and covariate levels
Joint2.stk = inla.stack(jdetect.stk, jenv.stk)
#Save data to run in HPCC
#save(list=c("Joint2.stk", "barrier.model", "spde"), file="./HPC/Apr23/TwoReg.RData") #File for HPCC
Two Regions Model
Site.prior = list(theta=list(prior = "normal", param=c(0, 3))) #Prior for site effect
Reg.prior = list(theta=list(prior = "normal", param=c(0, 3))) #prior for region
JFrm0 = Y ~ -1 + intercept1 + #intercept (detection level)
intercept2 + #Intercept (covariate level)
f(field.det, #spatial index (detection)
model=barrier.model) + #change "barrier.model" to "spde" for no-barrier version.
f(field.det.c, #Shared spatial field to account for correlation between model levels
copy = "field.det", #copied from detection level of model
fixed = FALSE) +
f(field.env, #spatial index for covariate level
model=barrier.model) +
f(Site, #Site identifier
model="iid",
hyper=Site.prior) + #site prior
f(mMxTemp, #temperature
model="rw1", #random walk of order 1
replicate = Region, #replicate for each region
hyper = Reg.prior) + #prior
f(mSnw5yr, #weekly snowfall
model="rw1",
replicate = Region,
hyper = Reg.prior) +
f(Forest, #Forest
model="iid",
replicate = Region,
hyper = Reg.prior)
#thetaJ = JModel.full$internal.summary.hyperpar$mean #from initial run (to speed up model)
thetaJ = c(1.1935975, 3.7979780, -2.1432853, 6.6863289, -0.6710575, -0.1090578, -0.9891725, 0.6587151, 1)
JModel.2Reg = inla(JFrm0, #formula
data = inla.stack.data(Joint2.stk), #data list object
family = c("binomial","binomial"), #families for detection and covariate levels
verbose = FALSE, #Show running process
Ntrials = inla.stack.data(Joint2.stk)$Field.trials, #number trials
control.fixed = list(prec = 0.001, #priors for intercept
prec.intercept = 0.0001),
control.predictor = list(
A = inla.stack.A(Joint2.stk), #data again
compute = TRUE, #estimate fitted values
link = 1), #transform fitted values from logit
control.mode = list(restart = TRUE, theta = thetaJ), #to speed up
control.inla = list(strategy="gaussian", #to speed up
int.strategy = "eb"),
control.results = list(return.marginals.random = TRUE, #results to report
return.marginals.predictor = TRUE),
control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE)) #calculate indicies for comparison
Two Regions Model Ran on HPCC
load("~/Michigan_State/Sean/HPC/Apr23/Reg2_042419.RData")
summary(JModel.2Reg)
##
## Call:
## c("inla(formula = JFrm0, family = c(\"binomial\", \"binomial\"), data = inla.stack.data(Joint2.stk), ", " Ntrials = inla.stack.data(Joint2.stk)$Field.trials, verbose = TRUE, ", " control.compute = list(dic = TRUE, cpo = TRUE, waic = TRUE), ", " control.predictor = list(A = inla.stack.A(Joint2.stk), compute = TRUE, ", " link = 1), control.inla = list(strategy = \"gaussian\", ", " int.strategy = \"eb\"), control.results = list(return.marginals.random = TRUE, ", " return.marginals.predictor = TRUE), control.fixed = list(prec = 0.001, ", " prec.intercept = 1e-04), control.mode = list(restart = TRUE, ", " theta = thetaJ), num.threads = 8)")
##
## Time used:
## Pre-processing Running inla Post-processing Total
## 3.1637 8290.9065 4.0825 8298.1528
##
## Fixed effects:
## mean sd 0.025quant 0.5quant 0.975quant mode kld
## intercept1 -4.3864 0.3892 -5.1505 -4.3864 -3.6229 -4.3864 0
## intercept2 -7.2700 0.5296 -8.3098 -7.2700 -6.2312 -7.2700 0
##
## Random effects:
## Name Model
## field.det RGeneric2
## field.env RGeneric2
## Site IID model
## mMxTemp RW1 model
## mSnw5yr RW1 model
## Forest RW1 model
## field.det.c Copy
##
## Model hyperparameters:
## mean sd 0.025quant 0.5quant 0.975quant
## Theta1 for field.det 1.1729 0.1039 0.9678 1.1732 1.3763
## Theta2 for field.det 3.4372 0.1499 3.1455 3.4360 3.7349
## Theta1 for field.env -1.4900 0.7321 -3.0835 -1.4201 -0.2348
## Theta2 for field.env 6.2731 0.6752 5.0497 6.2320 7.6903
## Precision for Site 0.8376 0.3057 0.4000 0.7838 1.5811
## Precision for mMxTemp 0.7433 0.3109 0.3144 0.6845 1.5125
## Precision for mSnw5yr 1.4042 0.6069 0.5896 1.2819 2.9269
## Precision for Forest 1.6098 0.6864 0.6595 1.4819 3.3080
## Beta for field.det.c 0.5383 0.0663 0.4092 0.5379 0.6692
## mode
## Theta1 for field.det 1.1741
## Theta2 for field.det 3.4317
## Theta1 for field.env -1.1590
## Theta2 for field.env 6.0833
## Precision for Site 0.6884
## Precision for mMxTemp 0.5821
## Precision for mSnw5yr 1.0762
## Precision for Forest 1.2554
## Beta for field.det.c 0.5365
##
## Expected number of effective parameters(std dev): 356.19(0.00)
## Number of equivalent replicates : 36.80
##
## Deviance Information Criterion (DIC) ...............: 2243.46
## Deviance Information Criterion (DIC, saturated) ....: NULL
## Effective number of parameters .....................: 562.02
##
## Watanabe-Akaike information criterion (WAIC) ...: 2633.14
## Effective number of parameters .................: 594.96
##
## Marginal log-Likelihood: -21428.82
## CPO and PIT are computed
##
## Posterior marginals for linear predictor and fitted values computed
Combined Model Also ran covariate model version without replications by region. Loading results:
load("~/Michigan_State/Sean/HPC/Apr23/Comb_042419.RData")
summary(JModel.2Reg)
##
## Call:
## c("inla(formula = JFrm0, family = c(\"binomial\", \"binomial\"), data = inla.stack.data(Joint2.stk), ", " Ntrials = inla.stack.data(Joint2.stk)$Field.trials, verbose = TRUE, ", " control.compute = list(dic = TRUE, cpo = TRUE, waic = TRUE), ", " control.predictor = list(A = inla.stack.A(Joint2.stk), compute = TRUE, ", " link = 1), control.inla = list(strategy = \"gaussian\", ", " int.strategy = \"eb\"), control.results = list(return.marginals.random = TRUE, ", " return.marginals.predictor = TRUE), control.fixed = list(prec = 0.001, ", " prec.intercept = 1e-04), control.mode = list(restart = TRUE, ", " theta = thetaJ), num.threads = 8)")
##
## Time used:
## Pre-processing Running inla Post-processing Total
## 3.1637 8290.9065 4.0825 8298.1528
##
## Fixed effects:
## mean sd 0.025quant 0.5quant 0.975quant mode kld
## intercept1 -4.3864 0.3892 -5.1505 -4.3864 -3.6229 -4.3864 0
## intercept2 -7.2700 0.5296 -8.3098 -7.2700 -6.2312 -7.2700 0
##
## Random effects:
## Name Model
## field.det RGeneric2
## field.env RGeneric2
## Site IID model
## mMxTemp RW1 model
## mSnw5yr RW1 model
## Forest RW1 model
## field.det.c Copy
##
## Model hyperparameters:
## mean sd 0.025quant 0.5quant 0.975quant
## Theta1 for field.det 1.1729 0.1039 0.9678 1.1732 1.3763
## Theta2 for field.det 3.4372 0.1499 3.1455 3.4360 3.7349
## Theta1 for field.env -1.4900 0.7321 -3.0835 -1.4201 -0.2348
## Theta2 for field.env 6.2731 0.6752 5.0497 6.2320 7.6903
## Precision for Site 0.8376 0.3057 0.4000 0.7838 1.5811
## Precision for mMxTemp 0.7433 0.3109 0.3144 0.6845 1.5125
## Precision for mSnw5yr 1.4042 0.6069 0.5896 1.2819 2.9269
## Precision for Forest 1.6098 0.6864 0.6595 1.4819 3.3080
## Beta for field.det.c 0.5383 0.0663 0.4092 0.5379 0.6692
## mode
## Theta1 for field.det 1.1741
## Theta2 for field.det 3.4317
## Theta1 for field.env -1.1590
## Theta2 for field.env 6.0833
## Precision for Site 0.6884
## Precision for mMxTemp 0.5821
## Precision for mSnw5yr 1.0762
## Precision for Forest 1.2554
## Beta for field.det.c 0.5365
##
## Expected number of effective parameters(std dev): 356.19(0.00)
## Number of equivalent replicates : 36.80
##
## Deviance Information Criterion (DIC) ...............: 2243.46
## Deviance Information Criterion (DIC, saturated) ....: NULL
## Effective number of parameters .....................: 562.02
##
## Watanabe-Akaike information criterion (WAIC) ...: 2633.14
## Effective number of parameters .................: 594.96
##
## Marginal log-Likelihood: -21428.82
## CPO and PIT are computed
##
## Posterior marginals for linear predictor and fitted values computed
No Covariates, No Trials.
For Comparison.
JFrmB = Y ~ -1 + intercept3 + #Intercept (covariate level)
f(field.base, #spatial index for covariate level
model=barrier.model)
#thetaJ = JModel.base$internal.summary.hyperpar$mean #from initial run (to speed up model)
thetaJ = c(1.444903, 4.073292)
JModel.base = inla(JFrmB, #formula
data = inla.stack.data(base.stk), #data list object
family = "binomial",#family
verbose = FALSE, #Show running process
#Ntrials = inla.stack.data(Joint.stk)$Field.trials, #default is 1
control.fixed = list(prec = 0.001, #priors for intercept
prec.intercept = 0.0001),
control.predictor = list(
A = inla.stack.A(base.stk), #data again
compute = TRUE, #estimate fitted values
link = 1), #transform fitted values from logit
control.mode = list(restart = TRUE, theta = thetaJ), #to speed up
control.inla = list(strategy="gaussian", #to speed up
int.strategy = "eb"),
control.results = list(return.marginals.random = TRUE, #results to report
return.marginals.predictor = TRUE),
control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE)) #calculate indicies for comparison
##
## Call:
## c("inla(formula = JFrmB, family = \"binomial\", data = inla.stack.data(base.stk), ", " verbose = FALSE, control.compute = list(dic = TRUE, cpo = TRUE, ", " waic = TRUE), control.predictor = list(A = inla.stack.A(base.stk), ", " compute = TRUE, link = 1), control.inla = list(strategy = \"gaussian\", ", " int.strategy = \"eb\"), control.results = list(return.marginals.random = TRUE, ", " return.marginals.predictor = TRUE), control.fixed = list(prec = 0.001, ", " prec.intercept = 1e-04), control.mode = list(restart = TRUE, ", " theta = thetaJ))")
##
## Time used:
## Pre-processing Running inla Post-processing Total
## 1.4933 802.4230 1.2484 805.1647
##
## Fixed effects:
## mean sd 0.025quant 0.5quant 0.975quant mode kld
## intercept3 -6.4402 0.6048 -7.6276 -6.4402 -5.2538 -6.4402 0
##
## Random effects:
## Name Model
## field.base RGeneric2
##
## Model hyperparameters:
## mean sd 0.025quant 0.5quant 0.975quant mode
## Theta1 for field.base 0.5965 0.1966 0.2137 0.5952 0.9857 0.5906
## Theta2 for field.base 5.4381 0.2603 4.9443 5.4307 5.9669 5.4040
##
## Expected number of effective parameters(std dev): 59.26(0.00)
## Number of equivalent replicates : 107.89
##
## Deviance Information Criterion (DIC) ...............: 1182.34
## Deviance Information Criterion (DIC, saturated) ....: 1182.34
## Effective number of parameters .....................: 88.12
##
## Watanabe-Akaike information criterion (WAIC) ...: 1144.80
## Effective number of parameters .................: 47.46
##
## Marginal log-Likelihood: -593.49
## CPO and PIT are computed
##
## Posterior marginals for linear predictor and fitted values computed
Comb.plt.df = as.data.frame(JModel.full$summary.random$mMxTemp)[,1:6] #Get effect estimates (Full model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975") #Clean labels
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 3059) #Add Region ID
Comb.plt.df2 = as.data.frame(JModel.comb$summary.random$mMxTemp)[,1:6] #Get effect estimates (No replicates model)
names(Comb.plt.df2) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df2$Region = "Combined" #ID
Comb.plt.df = rbind(Comb.plt.df, Comb.plt.df2) #Combine for plotting
Plt.df = Comb.plt.df
Plt.df$Region = as.factor(Plt.df$Region)
RAnge = range(Plt.df$ID)
Plt.all = ggplot(Plt.df, aes(ID, Mean, group = Region)) +
geom_smooth(aes(col = Region,
linetype= Region),
method = "loess",
span = 0.3,
se = FALSE,
lwd = 1) +
scale_linetype_manual(values=c("solid", "longdash", "dotted", "dashed")) +
scale_colour_manual(values=c("red", "brown", "blue", "black")) +
geom_hline(yintercept = 0,
linetype = "solid",
col = "lightgray",
size = 0.5) +
#geom_vline(xintercept = 0,
# linetype = "dotted",
# col = "red",
# size = 0.5) +
xlim(RAnge[1], RAnge[2]) +
xlab("Mean Maximum Temperature (°C)") +
ylab("Occurrence Probability (logit)") +
theme_classic() +
theme(axis.text=element_text(size=16),
legend.title = element_text(size=16, face="bold"),
legend.key = element_rect(fill = "gray80", linetype=0),
legend.background = element_rect(fill = "gray80", linetype=0),
strip.text = element_text(face="bold", size = 20),
axis.title.y = element_text(face="bold", size = 20),
axis.title.x = element_text(face="bold", size = 20, vjust=-2))
Plt.all
Comb.plt.df = as.data.frame(JModel.full$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 84)
Comb.plt.df2 = as.data.frame(JModel.comb$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df2) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df2$Region = "Combined"
Comb.plt.df = rbind(Comb.plt.df, Comb.plt.df2)
Plt.df = Comb.plt.df
Plt.df$Region = as.factor(Plt.df$Region)
RAnge = range(Plt.df$ID)
Plt.all = ggplot(Plt.df, aes(ID, Mean, group = Region)) +
geom_smooth(aes(col = Region,
linetype= Region),
method = "loess",
span = 0.3,
se = FALSE,
lwd = 1) +
scale_linetype_manual(values=c("solid", "longdash", "dotted", "dashed")) +
scale_colour_manual(values=c("red", "brown", "blue", "black")) +
geom_hline(yintercept = 0,
linetype = "solid",
col = "lightgray",
size = 0.5) +
geom_vline(xintercept = 0,
linetype = "dotted",
col = "red",
size = 0.5) +
xlim(RAnge[1], RAnge[2]) +
xlab("Snow Weeks") +
ylab("Occurrence Probability (logit)") +
theme_classic() +
theme(axis.text=element_text(size=16),
legend.title = element_text(size=16, face="bold"),
legend.key = element_rect(fill = "gray80", linetype=0),
legend.background = element_rect(fill = "gray80", linetype=0),
strip.text = element_text(face="bold", size = 20),
axis.title.y = element_text(face="bold", size = 20),
axis.title.x = element_text(face="bold", size = 20, vjust=-2))
Plt.all
Comb.plt.df = as.data.frame(JModel.full$summary.random$Forest)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 110)
Comb.plt.df2 = as.data.frame(JModel.comb$summary.random$Forest)[,1:6]
names(Comb.plt.df2) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df2$Region = "Combined"
Plt.df = Comb.plt.df
Plt.df$Region = as.factor(Plt.df$Region)
RAnge = range(Plt.df$ID)
Plt.all = ggplot(Plt.df, aes(ID, Mean, group = Region)) +
geom_smooth(aes(col = Region,
linetype= Region),
method = "loess",
span = 0.3,
se = FALSE,
lwd = 1) +
geom_smooth(data=Comb.plt.df2,
aes(ID, Mean, col = Region,
linetype= Region),
method = "loess",
span = 0.6,
se = FALSE,
lwd = 1) +
scale_linetype_manual(values=c("solid", "longdash", "dotted", "dashed")) +
scale_colour_manual(values=c("red", "brown", "blue", "black")) +
geom_hline(yintercept = 0,
linetype = "solid",
col = "lightgray",
size = 0.5) +
geom_vline(xintercept = 0,
linetype = "dotted",
col = "red",
size = 0.5) +
xlim(RAnge[1], RAnge[2]) +
xlab("Forest (1km Grid)") +
ylab("Occurrence Probability (logit)") +
theme_classic() +
theme(axis.text=element_text(size=16),
legend.title = element_text(size=16, face="bold"),
legend.key = element_rect(fill = "gray80", linetype=0),
legend.background = element_rect(fill = "gray80", linetype=0),
strip.text = element_text(face="bold", size = 20),
axis.title.y = element_text(face="bold", size = 20),
axis.title.x = element_text(face="bold", size = 20, vjust=-2))
Plt.all
Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$mMxTemp)[,1:6] #Get effect estimates (Full model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975") #Clean labels
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 3059) #Add Region ID
Comb.plt.df2 = as.data.frame(JModel.comb$summary.random$mMxTemp)[,1:6] #Get effect estimates (No replicates model)
names(Comb.plt.df2) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df2$Region = "Combined" #ID
Comb.plt.df = rbind(Comb.plt.df, Comb.plt.df2) #Combine for plotting
Plt.df = Comb.plt.df
Plt.df$Region = as.factor(Plt.df$Region)
RAnge = range(Plt.df$ID)
Plt.all = ggplot(Plt.df, aes(ID, Mean, group = Region)) +
geom_smooth(aes(col = Region,
linetype= Region),
method = "loess",
span = 0.3,
se = FALSE,
lwd = 1) +
scale_linetype_manual(values=c("solid", "longdash", "dotted", "dashed")) +
scale_colour_manual(values=c("red", "brown", "blue", "black")) +
geom_hline(yintercept = 0,
linetype = "solid",
col = "lightgray",
size = 0.5) +
#geom_vline(xintercept = 0,
# linetype = "dotted",
# col = "red",
# size = 0.5) +
xlim(RAnge[1], RAnge[2]) +
xlab("Mean Maximum Temperature (°C)") +
ylab("Occurrence Probability (logit)") +
theme_classic() +
theme(axis.text=element_text(size=16),
legend.title = element_text(size=16, face="bold"),
legend.key = element_rect(fill = "gray80", linetype=0),
legend.background = element_rect(fill = "gray80", linetype=0),
strip.text = element_text(face="bold", size = 20),
axis.title.y = element_text(face="bold", size = 20),
axis.title.x = element_text(face="bold", size = 20, vjust=-2))
Plt.all
Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 84)
Comb.plt.df2 = as.data.frame(JModel.comb$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df2) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df2$Region = "Combined"
Comb.plt.df = rbind(Comb.plt.df, Comb.plt.df2)
Plt.df = Comb.plt.df
Plt.df$Region = as.factor(Plt.df$Region)
RAnge = range(Plt.df$ID)
Plt.all = ggplot(Plt.df, aes(ID, Mean, group = Region)) +
geom_smooth(aes(col = Region,
linetype= Region),
method = "loess",
span = 0.3,
se = FALSE,
lwd = 1) +
scale_linetype_manual(values=c("solid", "longdash", "dotted", "dashed")) +
scale_colour_manual(values=c("red", "brown", "blue", "black")) +
geom_hline(yintercept = 0,
linetype = "solid",
col = "lightgray",
size = 0.5) +
geom_vline(xintercept = 0,
linetype = "dotted",
col = "red",
size = 0.5) +
xlim(RAnge[1], RAnge[2]) +
xlab("Snow Weeks") +
ylab("Occurrence Probability (logit)") +
theme_classic() +
theme(axis.text=element_text(size=16),
legend.title = element_text(size=16, face="bold"),
legend.key = element_rect(fill = "gray80", linetype=0),
legend.background = element_rect(fill = "gray80", linetype=0),
strip.text = element_text(face="bold", size = 20),
axis.title.y = element_text(face="bold", size = 20),
axis.title.x = element_text(face="bold", size = 20, vjust=-2))
Plt.all
Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$Forest)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 110)
Comb.plt.df2 = as.data.frame(JModel.comb$summary.random$Forest)[,1:6]
names(Comb.plt.df2) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df2$Region = "Combined"
Plt.df = Comb.plt.df
Plt.df$Region = as.factor(Plt.df$Region)
RAnge = range(Plt.df$ID)
Plt.all = ggplot(Plt.df, aes(ID, Mean, group = Region)) +
geom_smooth(aes(col = Region,
linetype= Region),
method = "loess",
span = 0.3,
se = FALSE,
lwd = 1) +
geom_smooth(data=Comb.plt.df2,
aes(ID, Mean, col = Region,
linetype= Region),
method = "loess",
span = 0.6,
se = FALSE,
lwd = 1) +
scale_linetype_manual(values=c("solid", "longdash", "dotted", "dashed")) +
scale_colour_manual(values=c("red", "brown", "blue", "black")) +
geom_hline(yintercept = 0,
linetype = "solid",
col = "lightgray",
size = 0.5) +
geom_vline(xintercept = 0,
linetype = "dotted",
col = "red",
size = 0.5) +
xlim(RAnge[1], RAnge[2]) +
xlab("Forest (1km Grid)") +
ylab("Occurrence Probability (logit)") +
theme_classic() +
theme(axis.text=element_text(size=16),
legend.title = element_text(size=16, face="bold"),
legend.key = element_rect(fill = "gray80", linetype=0),
legend.background = element_rect(fill = "gray80", linetype=0),
strip.text = element_text(face="bold", size = 20),
axis.title.y = element_text(face="bold", size = 20),
axis.title.x = element_text(face="bold", size = 20, vjust=-2))
Plt.all
Significant.
PhySig.df2 = as.data.frame(JModel.2Reg$summary.random$Site)
names(PhySig.df2) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975", "mode", "kld")
PhySig.df2$PSigL = ifelse(PhySig.df2$Q025>0 & PhySig.df2$Q975>0, 1, 0)
PhySig.df2$PSigH = ifelse(PhySig.df2$Q025<0 & PhySig.df2$Q975<0, 1, 0)
PhySig.df3 = PhySig.df2 %>%
filter(PSigL == 1 | PSigH == 1)
RM.m2 = ggplot(PhySig.df2, aes(x=ID, y=Mean)) +
geom_point(size=2, pch=1, col = "gray75") +
geom_linerange(aes(ymin=Q025, ymax=Q975), colour="gray75") +
geom_point(data=PhySig.df3, aes(x=ID, y=Mean),
size=2, pch=19, col = "red") +
geom_linerange(data=PhySig.df3, aes(ymin=Q025, ymax=Q975), colour="black") +
geom_hline(yintercept = 0,
linetype = "dotted",
colour = "red",
size = 0.75) +
theme_classic() +
xlab("Site Location") +
ylab("Detection Probabilty (logit)") +
theme(axis.title.y = element_text(face="bold", size=14),
axis.title.x = element_text(face="bold", size=14),
axis.text.y = element_text(face="bold", size=12),
axis.ticks.x=element_blank(),
axis.text.x = element_blank())
RM.m2
Pred.pnts = Grd.pnts #Dense grid created during pre-processing
ModResult = JModel.full ##Three Model
ModResult2 = JModel.comb ##Combined Model
ModResult3 = JModel.2Reg ##Two Region Model
ModResult4 = JModel.base
pLoc = cbind(Pred.pnts@coords[,1], Pred.pnts@coords[,2]) #coords
Ap = inla.spde.make.A(mesh, loc=pLoc) #Relate to mesh
Pred.pnts$Full = drop(Ap %*% ModResult$summary.random$field.env$mean)
Pred.pnts$Reg2 = drop(Ap %*% ModResult3$summary.random$field.env$mean)
Pred.pnts$Comb = drop(Ap %*% ModResult2$summary.random$field.env$mean)
Pred.pnts$BaseEnv = drop(Ap %*% ModResult4$summary.random$field.base$mean) #Get random field by location
#Create rasters
Full.rf.r = rasterize(spTransform(
Pred.pnts,
CRS(proj4string(States.r))),
States.r,
"Full",
background = NA)
Reg2.rf.r = rasterize(spTransform(
Pred.pnts,
CRS(proj4string(States.r))),
States.r,
"Reg2",
background = NA)
Comb.rf.r = rasterize(spTransform(
Pred.pnts,
CRS(proj4string(States.r))),
States.r,
"Comb",
background = NA)
Base.rf.r = rasterize(spTransform(
Pred.pnts,
CRS(proj4string(States.r))),
States.r,
"BaseEnv",
background = NA)
RF.stk = stack(Base.rf.r, Comb.rf.r, Reg2.rf.r, Full.rf.r)
names(RF.stk) = c("Base", "Combined", "Reg2", "Reg3")
rng = seq(-1.4, 5.7, 0.1)
mCols = brewer.pal(11, "RdYlGn")
cr = rev(colorRampPalette(mCols)(n = 500))
cr = colorRampPalette(cr,
bias = 2.4, space = "rgb")
levelplot(RF.stk,
layout = c(2,2),
margin = FALSE,
xlab = NULL,
ylab = NULL,
names.attr = c("Base", "Combined", "Two Regions", "Three Regions"),
maxpixels = 1e5,
col.regions = cr, at = rng,
colorkey = list(labels=list(cex=1.5),
space = "bottom"),
par.strip.text = list(fontface='bold', cex=1.5),
par.settings = list(axis.line = list(col = "black"),
strip.background = list(col = 'transparent'),
strip.border = list(col = 'transparent')),
scales = list(cex = 1.25)) +
latticeExtra::layer(sp.polygons(States_map, col = "black", lwd = 0.5)) +
#latticeExtra::layer(sp.polygons(Domain, col = "black", lwd = 0.5)) +
#latticeExtra::layer(sp.polygons(Hare.pnt , col = "black", pch=2, cex = 0.25)) +
#latticeExtra::layer(sp.text(MyMatrix, txt = Domain$Name,
# pos =c(1,3,3,2,2,1,1),
# col="black",font=list(face="bold"), cex=1)) +
latticeExtra::layer({SpatialPolygonsRescale(layout.north.arrow(),
offset = c(-83, 45.5),
scale = 2)})
Based on covariates only.
ModResult = JModel.full
#Update Region ID for prediction locations (MI and WI only)
MI_WI_Domain = subset(Domain, Name == "Michigan" | Name == "Wisconsin")
Pred.pnts = Grd.pnts
MI_WI_DomainP = spTransform(MI_WI_Domain, proj4string(Pred.pnts))
Pred.pnts$nDom = over(Pred.pnts, MI_WI_DomainP)[,1]
Pred.pntsN = subset(Pred.pnts, is.na(nDom) == FALSE)
Pred.pntsN$Region = ifelse(Pred.pntsN@data$nDom == 21 & Pred.pntsN@data$UP == "FALSE", "Upper", #FALSE indicates UP
ifelse(Pred.pntsN@data$nDom == 21 & Pred.pntsN@data$UP == "TRUE", "Lower",
ifelse(Pred.pntsN@data$nDom == 48, "Wisconsin", NA)))
#Get RF
pLoc = cbind(Pred.pntsN@coords[,1], Pred.pntsN@coords[,2])
Ap = inla.spde.make.A(mesh, loc=pLoc)
Pred.pntsN$Full.rf = drop(Ap %*% ModResult$summary.random$field.env$mean)
#Get Temperature
Comb.plt.df = as.data.frame(JModel.full$summary.random$mMxTemp)[,1:6] #Get effect estimates (Full model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975") #Clean labels
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 3059)
#Region-sepcific estimates
UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")
UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]
#Look up UP Temp Estimates
Pred.pntsN$UP.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - UP.lu$ID)))
Pred.pntsN$UP.TmpEst = as.numeric(with(UP.lu,
Mean[match(Pred.pntsN$UP.temp,
UP.POS)]))
#Look up Lower Temp Estimates
Pred.pntsN$LW.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
Mean[match(Pred.pntsN$LW.temp,
LW.POS)]))
#Look up WI Temp Estimates
Pred.pntsN$WI.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
Mean[match(Pred.pntsN$WI.temp,
WI.POS)]))
#Match to region
Pred.pntsN$Temp.Full = ifelse(Pred.pntsN$Region == "Upper", Pred.pntsN$UP.TmpEst,
ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst,
ifelse(Pred.pntsN$Region == "Wisconsin", Pred.pntsN$WI.TmpEst, NA)))
#Get Snow Week (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.full$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 84)
UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")
UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]
Pred.pntsN$UP.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - UP.lu$ID)))
Pred.pntsN$UP.TmpEst = as.numeric(with(UP.lu,
Mean[match(Pred.pntsN$UP.temp,
UP.POS)]))
Pred.pntsN$LW.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
Mean[match(Pred.pntsN$LW.temp,
LW.POS)]))
Pred.pntsN$WI.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
Mean[match(Pred.pntsN$WI.temp,
WI.POS)]))
#Match to region
Pred.pntsN$Snow.Full = ifelse(Pred.pntsN$Region == "Upper", Pred.pntsN$UP.TmpEst,
ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst,
ifelse(Pred.pntsN$Region == "Wisconsin", Pred.pntsN$WI.TmpEst, NA)))
#Get Forest (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.full$summary.random$Forest)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 110)
UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")
UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]
Pred.pntsN$UP.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - UP.lu$ID)))
Pred.pntsN$UP.TmpEst = as.numeric(with(UP.lu,
Mean[match(Pred.pntsN$UP.temp,
UP.POS)]))
Pred.pntsN$LW.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
Mean[match(Pred.pntsN$LW.temp,
LW.POS)]))
Pred.pntsN$WI.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
Mean[match(Pred.pntsN$WI.temp,
WI.POS)]))
#Match to region
Pred.pntsN$Forest.Full = ifelse(Pred.pntsN$Region == "Upper", Pred.pntsN$UP.TmpEst,
ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst,
ifelse(Pred.pntsN$Region == "Wisconsin", Pred.pntsN$WI.TmpEst, NA)))
Pred.pntsN@data = Pred.pntsN@data %>%
mutate(Pred.Full = plogis(Temp.Full + Snow.Full + Forest.Full))
Full.pred.r = rasterize(spTransform(
Pred.pntsN,
CRS(proj4string(States.r))),
States.r,
"Pred.Full",
background = NA)
Combined Model Predictions
ModResult = JModel.comb
Pred.pnts = Pred.pntsN
#Get RF
pLoc = cbind(Pred.pntsN@coords[,1], Pred.pntsN@coords[,2])
Ap = inla.spde.make.A(mesh, loc=pLoc)
Pred.pntsN$Comb.rf = drop(Ap %*% ModResult$summary.random$field.env$mean)
#Get Temperature
Comb.plt.df = as.data.frame(JModel.comb$summary.random$mMxTemp)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Pred.pntsN$UP.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Pred.pntsN$cmb.TmpEst = as.numeric(with(Comb.plt.df,
Mean[match(Pred.pntsN$UP.temp,
Comb.POS)]))
#Get Snow
Comb.plt.df = as.data.frame(JModel.comb$summary.random$mSnw5yr)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Pred.pntsN$UP.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Pred.pntsN$Cmb.SnwEst = as.numeric(with(Comb.plt.df,
Mean[match(Pred.pntsN$UP.temp,
Comb.POS)]))
#Get Forest
Comb.plt.df = as.data.frame(JModel.comb$summary.random$Forest)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Pred.pntsN$UP.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Pred.pntsN$Cmb.ForEst = as.numeric(with(Comb.plt.df,
Mean[match(Pred.pntsN$UP.temp,
Comb.POS)]))
Pred.pntsN@data = Pred.pntsN@data %>%
mutate(Pred.Comb = plogis(cmb.TmpEst + Cmb.SnwEst + Cmb.ForEst))
Comb.pred.r = rasterize(spTransform(
Pred.pntsN,
CRS(proj4string(States.r))),
States.r,
"Pred.Comb",
background = NA)
Two Region Model Based on covariates only.
ModResult = JModel.2Reg
#Update Region ID for prediction locations (MI and WI only)
MI_WI_Domain = subset(Domain, Name == "Michigan" | Name == "Wisconsin")
Pred.pnts = Grd.pnts
MI_WI_DomainP = spTransform(MI_WI_Domain, proj4string(Pred.pnts))
Pred.pnts$nDom = over(Pred.pnts, MI_WI_DomainP)[,1]
Pred.pntsN = subset(Pred.pnts, is.na(nDom) == FALSE)
Pred.pntsN$Region = ifelse(Pred.pntsN@data$nDom == 21 & Pred.pntsN@data$UP == "FALSE", "Upper", #FALSE indicates UP
ifelse(Pred.pntsN@data$nDom == 21 & Pred.pntsN@data$UP == "TRUE", "Lower",
ifelse(Pred.pntsN@data$nDom == 48, "Wisconsin", NA)))
Pred.pntsN$Region = ifelse(Pred.pntsN$Region == "Upper" | Pred.pntsN$Region == "Wisconsin", "Wisconsin + UP", Pred.pntsN$Region)
#Get RF
pLoc = cbind(Pred.pntsN@coords[,1], Pred.pntsN@coords[,2])
Ap = inla.spde.make.A(mesh, loc=pLoc)
Pred.pntsN$REg2.rf = drop(Ap %*% ModResult$summary.random$field.env$mean)
#Get Temperature
Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$mMxTemp)[,1:6] #Get effect estimates (Full model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975") #Clean labels
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 3059)
#Region-sepcific estimates
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin + Upper")
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]
#Look up Lower Temp Estimates
Pred.pntsN$LW.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
Mean[match(Pred.pntsN$LW.temp,
LW.POS)]))
#Look up WI + UP Temp Estimates
Pred.pntsN$WI.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
Mean[match(Pred.pntsN$WI.temp,
WI.POS)]))
#Match to region
Pred.pntsN$Temp.Full = ifelse(Pred.pntsN$Region == "Wisconsin + UP", Pred.pntsN$WI.TmpEst,
ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst, NA))
#Get Snow Week (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 84)
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin + Upper")
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]
Pred.pntsN$LW.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
Mean[match(Pred.pntsN$LW.temp,
LW.POS)]))
Pred.pntsN$WI.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
Mean[match(Pred.pntsN$WI.temp,
WI.POS)]))
#Match to region
Pred.pntsN$Snow.Full = ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst,
ifelse(Pred.pntsN$Region == "Wisconsin + UP", Pred.pntsN$WI.TmpEst, NA))
#Get Forest (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$Forest)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 110)
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin + Upper")
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]
Pred.pntsN$LW.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
Mean[match(Pred.pntsN$LW.temp,
LW.POS)]))
Pred.pntsN$WI.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
Mean[match(Pred.pntsN$WI.temp,
WI.POS)]))
#Match to region
Pred.pntsN$Forest.Full = ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst,
ifelse(Pred.pntsN$Region == "Wisconsin + UP", Pred.pntsN$WI.TmpEst, NA))
Pred.pntsN@data = Pred.pntsN@data %>%
mutate(Pred.Full = plogis(Temp.Full + Snow.Full + Forest.Full))
Reg2.pred.r = rasterize(spTransform(
Pred.pntsN,
CRS(proj4string(States.r))),
States.r,
"Pred.Full",
background = NA)
Pred.stk = stack(Comb.pred.r, Reg2.pred.r, Full.pred.r)
names(Pred.stk) = c("Combined", "REg2", "Reg3")
rng = seq(0, 1, 0.01)
mCols = brewer.pal(9, "YlOrBr")
cr = colorRampPalette(mCols)(n = 500)
cr = colorRampPalette(cr,
bias = 1, space = "rgb")
levelplot(Pred.stk, #Trial.comp,
layout = c(3,1),
margin = FALSE,
xlab = NULL,
ylab = NULL,
#main = "Occurrence Probabilty",
names.attr= c("Combined", "Two Region", "Three Region"),
maxpixels = 1e5,
col.regions = cr, at = rng,
colorkey = list(labels=list(cex=1.5),
space = "bottom"),
par.strip.text = list(fontface='bold', cex=1.5),
par.settings = list(axis.line = list(col = "black"),
strip.background = list(col = 'transparent'),
strip.border = list(col = 'transparent')),
scales = list(cex = 1.25)) +
latticeExtra::layer(sp.polygons(States_map, col = "black", lwd = 1)) +
#latticeExtra::layer(sp.polygons(Hare.pnt , col = "black", pch=2, cex = 0.25)) +
latticeExtra::layer({SpatialPolygonsRescale(layout.north.arrow(),
offset = c(-83, 45.5),
scale = 2)})
Prediction for Sample Locations
Obs.set = subset(HareMesh.mod, OBS == 1)
Obs.set = Validation.set2
#Obs.set = subset(Obs.set, OBS == 1)
#Update Region ID for prediction locations (MI and WI only)
MI_WI_Domain = subset(Domain, Name == "Michigan" | Name == "Wisconsin")
Obs.set$nDom = over(Obs.set, MI_WI_DomainP)[,1]
Obs.set = subset(Obs.set, is.na(nDom) == FALSE)
Obs.set$Region = ifelse(Obs.set@data$nDom == 21 & Obs.set@data$UP == "FALSE", "Upper", #FALSE indicates UP
ifelse(Obs.set@data$nDom == 21 & Obs.set@data$UP == "TRUE", "Lower",
ifelse(Obs.set@data$nDom == 48, "Wisconsin", NA)))
Obs.set = Obs.set@data
dim(Obs.set)
## [1] 5680 20
sum(Obs.set$OBS)
## [1] 1026
Obs.set %>%
group_by(Region) %>%
summarise(Count = length(Region))
## # A tibble: 3 x 2
## Region Count
## <chr> <int>
## 1 Lower 1244
## 2 Upper 1820
## 3 Wisconsin 2616
#Get Temperature
Comb.plt.df = as.data.frame(JModel.full$summary.random$mMxTemp)[,1:6] #Get effect estimates (Full model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975") #Clean labels
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 3059)
#Region-sepcific estimates
UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")
UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]
#Look up UP Temp Estimates
Obs.set$UP.temp = sapply(Obs.set$mxTempE, function(x)which.min(abs(x - UP.lu$ID)))
Obs.set$UP.TmpEst = as.numeric(with(UP.lu,
Mean[match(Obs.set$UP.temp,
UP.POS)]))
#Look up Lower Temp Estimates
Obs.set$LW.temp = sapply(Obs.set$mxTempE, function(x)which.min(abs(x - LW.lu$ID)))
Obs.set$LW.TmpEst = as.numeric(with(LW.lu,
Mean[match(Obs.set$LW.temp,
LW.POS)]))
#Look up WI Temp Estimates
Obs.set$WI.temp = sapply(Obs.set$mxTempE, function(x)which.min(abs(x - WI.lu$ID)))
Obs.set$WI.TmpEst = as.numeric(with(WI.lu,
Mean[match(Obs.set$WI.temp,
WI.POS)]))
#Match to region
Obs.set$Temp.Full = ifelse(Obs.set$Region == "Upper", Obs.set$UP.TmpEst,
ifelse(Obs.set$Region == "Lower", Obs.set$LW.TmpEst,
ifelse(Obs.set$Region == "Wisconsin", Obs.set$WI.TmpEst, NA)))
#Get Snow Week (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.full$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 84)
UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")
UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]
Obs.set$UP.temp = sapply(Obs.set$mSnow5yrE, function(x)which.min(abs(x - UP.lu$ID)))
Obs.set$UP.TmpEst = as.numeric(with(UP.lu,
Mean[match(Obs.set$UP.temp,
UP.POS)]))
Obs.set$LW.temp = sapply(Obs.set$mSnow5yrE, function(x)which.min(abs(x - LW.lu$ID)))
Obs.set$LW.TmpEst = as.numeric(with(LW.lu,
Mean[match(Obs.set$LW.temp,
LW.POS)]))
Obs.set$WI.temp = sapply(Obs.set$mSnow5yrE, function(x)which.min(abs(x - WI.lu$ID)))
Obs.set$WI.TmpEst = as.numeric(with(WI.lu,
Mean[match(Obs.set$WI.temp,
WI.POS)]))
#Match to region
Obs.set$Snow.Full = ifelse(Obs.set$Region == "Upper", Obs.set$UP.TmpEst,
ifelse(Obs.set$Region == "Lower", Obs.set$LW.TmpEst,
ifelse(Obs.set$Region == "Wisconsin", Obs.set$WI.TmpEst, NA)))
#Get Forest (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.full$summary.random$Forest)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 110)
UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")
UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]
Obs.set$UP.temp = sapply(Obs.set$Forest1kmE, function(x)which.min(abs(x - UP.lu$ID)))
Obs.set$UP.TmpEst = as.numeric(with(UP.lu,
Mean[match(Obs.set$UP.temp,
UP.POS)]))
Obs.set$LW.temp = sapply(Obs.set$Forest1kmE, function(x)which.min(abs(x - LW.lu$ID)))
Obs.set$LW.TmpEst = as.numeric(with(LW.lu,
Mean[match(Obs.set$LW.temp,
LW.POS)]))
Obs.set$WI.temp = sapply(Obs.set$Forest1kmE, function(x)which.min(abs(x - WI.lu$ID)))
Obs.set$WI.TmpEst = as.numeric(with(WI.lu,
Mean[match(Obs.set$WI.temp,
WI.POS)]))
#Match to region
Obs.set$Forest.Full = ifelse(Obs.set$Region == "Upper", Obs.set$UP.TmpEst,
ifelse(Obs.set$Region == "Lower", Obs.set$LW.TmpEst,
ifelse(Obs.set$Region == "Wisconsin", Obs.set$WI.TmpEst, NA)))
Obs.set = Obs.set %>%
mutate(Pred.Full = plogis(Temp.Full + Snow.Full + Forest.Full))
Obs.set.R3.keep = Obs.set
#Obs.set$Prop = Obs.set$Counts/Obs.set$Trials
#Full.BS = mean((Obs.set$Pred.Full - Obs.set$Prop)^2)
#Full.BS
Full.PA = as.data.frame(cbind(1:dim(Obs.set)[1], Obs.set$OBS, Obs.set$Pred.Full))
names(Full.PA) = c("ID", "OBS", "Pred")
Thresh = optimal.thresholds(Full.PA, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh
## Method Pred
## 1 MaxSens+Spec 0.91
## 2 PredPrev=Obs 0.97
Full.PA.out = presence.absence.accuracy(Full.PA, threshold = Thresh[1,2])
#Calculate TSS
Full.PA.out$TSS = Full.PA.out$sensitivity + Full.PA.out$specificity - 1
#Combined Model
#Get Temperature
Comb.plt.df = as.data.frame(JModel.comb$summary.random$mMxTemp)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Obs.set$UP.temp = sapply(Obs.set$mxTempE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Obs.set$cmb.TmpEst = as.numeric(with(Comb.plt.df,
Mean[match(Obs.set$UP.temp,
Comb.POS)]))
#Get Snow
Comb.plt.df = as.data.frame(JModel.comb$summary.random$mSnw5yr)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Obs.set$UP.temp = sapply(Obs.set$mSnow5yrE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Obs.set$Cmb.SnwEst = as.numeric(with(Comb.plt.df,
Mean[match(Obs.set$UP.temp,
Comb.POS)]))
#Get Forest
Comb.plt.df = as.data.frame(JModel.comb$summary.random$Forest)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Obs.set$UP.temp = sapply(Obs.set$Forest1kmE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Obs.set$Cmb.ForEst = as.numeric(with(Comb.plt.df,
Mean[match(Obs.set$UP.temp,
Comb.POS)]))
Obs.set = Obs.set %>%
mutate(Pred.Comb = plogis(cmb.TmpEst + Cmb.SnwEst + Cmb.ForEst))
Obs.set.comb.keep = Obs.set
#Comb.BS = mean((Obs.set$Pred.Comb - Obs.set$Prop)^2, na.rm=T)
#Comb.BS
Comb.PA = as.data.frame(cbind((1:dim(Obs.set)[1]), Obs.set$OBS, Obs.set$Pred.Comb))
names(Comb.PA) = c("ID", "OBS", "Pred")
Thresh2 = optimal.thresholds(Comb.PA, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh2
## Method Pred
## 1 MaxSens+Spec 0.82
## 2 PredPrev=Obs 0.90
Comb.PA.out = presence.absence.accuracy(Comb.PA, threshold = Thresh2[1,2])
#Calculate TSS
Comb.PA.out$TSS = Comb.PA.out$sensitivity + Comb.PA.out$specificity - 1
######################################
##TWo Region
ModResult = JModel.2Reg
Obs.set = Validation.set2
#Update Region ID for prediction locations (MI and WI only)
MI_WI_Domain = subset(Domain, Name == "Michigan" | Name == "Wisconsin")
MI_WI_DomainP = spTransform(MI_WI_Domain, proj4string(Obs.set))
Obs.set$nDom = over(Obs.set, MI_WI_DomainP)[,1]
Obs.setN = subset(Obs.set, is.na(nDom) == FALSE)
Obs.setN$Region = ifelse(Obs.setN@data$nDom == 21 & Obs.setN@data$UP == "FALSE", "Upper", #FALSE indicates UP
ifelse(Obs.setN@data$nDom == 21 & Obs.setN@data$UP == "TRUE", "Lower",
ifelse(Obs.setN@data$nDom == 48, "Wisconsin", NA)))
Obs.setN$Region = ifelse(Obs.setN$Region == "Upper" | Obs.setN$Region == "Wisconsin", "Wisconsin + UP", Obs.setN$Region)
#Get RF
pLoc = cbind(Obs.setN@coords[,1], Obs.setN@coords[,2])
Ap = inla.spde.make.A(mesh, loc=pLoc)
Obs.setN$REg2.rf = drop(Ap %*% ModResult$summary.random$field.env$mean)
#Get Temperature
Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$mMxTemp)[,1:6] #Get effect estimates (Full model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975") #Clean labels
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 3059)
#Region-sepcific estimates
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin + Upper")
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]
#Look up Lower Temp Estimates
Obs.setN$LW.temp = sapply(Obs.setN$mxTempE, function(x)which.min(abs(x - LW.lu$ID)))
Obs.setN$LW.TmpEst = as.numeric(with(LW.lu,
Mean[match(Obs.setN$LW.temp,
LW.POS)]))
#Look up WI + UP Temp Estimates
Obs.setN$WI.temp = sapply(Obs.setN$mxTempE, function(x)which.min(abs(x - WI.lu$ID)))
Obs.setN$WI.TmpEst = as.numeric(with(WI.lu,
Mean[match(Obs.setN$WI.temp,
WI.POS)]))
#Match to region
Obs.setN$Temp.Full = ifelse(Obs.setN$Region == "Wisconsin + UP", Obs.setN$WI.TmpEst,
ifelse(Obs.setN$Region == "Lower", Obs.setN$LW.TmpEst, NA))
#Get Snow Week (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 84)
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin + Upper")
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]
Obs.setN$LW.temp = sapply(Obs.setN$mSnow5yrE, function(x)which.min(abs(x - LW.lu$ID)))
Obs.setN$LW.TmpEst = as.numeric(with(LW.lu,
Mean[match(Obs.setN$LW.temp,
LW.POS)]))
Obs.setN$WI.temp = sapply(Obs.setN$mSnow5yrE, function(x)which.min(abs(x - WI.lu$ID)))
Obs.setN$WI.TmpEst = as.numeric(with(WI.lu,
Mean[match(Obs.setN$WI.temp,
WI.POS)]))
#Match to region
Obs.setN$Snow.Full = ifelse(Obs.setN$Region == "Lower", Obs.setN$LW.TmpEst,
ifelse(Obs.setN$Region == "Wisconsin + UP", Obs.setN$WI.TmpEst, NA))
#Get Forest (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$Forest)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 110)
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin + Upper")
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]
Obs.setN$LW.temp = sapply(Obs.setN$Forest1kmE, function(x)which.min(abs(x - LW.lu$ID)))
Obs.setN$LW.TmpEst = as.numeric(with(LW.lu,
Mean[match(Obs.setN$LW.temp,
LW.POS)]))
Obs.setN$WI.temp = sapply(Obs.setN$Forest1kmE, function(x)which.min(abs(x - WI.lu$ID)))
Obs.setN$WI.TmpEst = as.numeric(with(WI.lu,
Mean[match(Obs.setN$WI.temp,
WI.POS)]))
#Match to region
Obs.setN$Forest.Full = ifelse(Obs.setN$Region == "Lower", Obs.setN$LW.TmpEst,
ifelse(Obs.setN$Region == "Wisconsin + UP", Obs.setN$WI.TmpEst, NA))
Obs.setN@data = Obs.setN@data %>%
mutate(Pred.Full = plogis(Temp.Full + Snow.Full + Forest.Full))
Obs.set.R2.keep = Obs.setN
Reg2.PA = as.data.frame(cbind(1:dim(Obs.setN)[1], Obs.setN$OBS, Obs.setN$Pred.Full))
names(Reg2.PA) = c("ID", "OBS", "Pred")
Thresh = optimal.thresholds(Reg2.PA, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh
## Method Pred
## 1 MaxSens+Spec 0.90
## 2 PredPrev=Obs 0.95
Reg2.PA.out = presence.absence.accuracy(Reg2.PA, threshold = Thresh[1,2])
#Calculate TSS
Reg2.PA.out$TSS = Reg2.PA.out$sensitivity + Reg2.PA.out$specificity - 1
Comb.PA.out
## model threshold PCC sensitivity specificity Kappa AUC
## 1 Pred 0.82 0.5971831 0.9805068 0.5126773 0.2660103 0.6823239
## PCC.sd sensitivity.sd specificity.sd Kappa.sd AUC.sd
## 1 0.006508361 0.004318219 0.007327635 0.008241037 0.00664742
## TSS
## 1 0.4931841
Reg2.PA.out
## model threshold PCC sensitivity specificity Kappa AUC
## 1 Pred 0.9 0.6290493 0.9892788 0.5496347 0.3007194 0.7256016
## PCC.sd sensitivity.sd specificity.sd Kappa.sd AUC.sd
## 1 0.006410096 0.00321677 0.007293786 0.008730896 0.006313229
## TSS
## 1 0.5389135
Full.PA.out
## model threshold PCC sensitivity specificity Kappa AUC
## 1 Pred 0.91 0.6660211 0.9152047 0.6110872 0.3180859 0.7545054
## PCC.sd sensitivity.sd specificity.sd Kappa.sd AUC.sd
## 1 0.006258461 0.00870128 0.007146793 0.01001113 0.006570182
## TSS
## 1 0.5262919
Model Compare Tables
#Environment Level
Compare.tab = as.data.frame(matrix(ncol = 7, nrow= 3))
names(Compare.tab) = c("Model", "DIC", "WAIC", "Sensitivity", "Specificity", "AUC", "TSS")
Compare.tab[1, 1] = "Combined"
Compare.tab[1, 2] = round(GetMets(JModel.comb)[1,3],2)
Compare.tab[1, 3] = round(GetMets(JModel.comb)[2,3],2)
Compare.tab[1, 4] = round(Comb.PA.out[1,4], 3)
Compare.tab[1, 5] = round(Comb.PA.out[1,5], 3)
Compare.tab[1, 6] = round(Comb.PA.out[1,7], 3)
Compare.tab[1, 7] = round(Comb.PA.out[1,13], 3)
Compare.tab[2, 1] = "Two Region"
Compare.tab[2, 2] = round(GetMets(JModel.2Reg)[1,3],2)
Compare.tab[2, 3] = round(GetMets(JModel.2Reg)[2,3],2)
Compare.tab[2, 4] = round(Reg2.PA.out[1,4], 3)
Compare.tab[2, 5] = round(Reg2.PA.out[1,5], 3)
Compare.tab[2, 6] = round(Reg2.PA.out[1,7], 3)
Compare.tab[2, 7] = round(Reg2.PA.out[1,13], 3)
Compare.tab[3, 1] = "Three Region"
Compare.tab[3, 2] = round(GetMets(JModel.full)[1,3],2)
Compare.tab[3, 3] = round(GetMets(JModel.full)[2,3],2)
Compare.tab[3, 4] = round(Full.PA.out[1,4], 3)
Compare.tab[3, 5] = round(Full.PA.out[1,5], 3)
Compare.tab[3, 6] = round(Full.PA.out[1,7], 3)
Compare.tab[3, 7] = round(Full.PA.out[1,13], 3)
#Detection Level
Compare.tab1 = as.data.frame(matrix(ncol = 3, nrow= 3))
names(Compare.tab1) = c("Model", "DIC", "WAIC")
Compare.tab1[1, 1] = "Combined"
Compare.tab1[1, 2] = round(GetMets(JModel.comb)[1,2],2)
Compare.tab1[1, 3] = round(GetMets(JModel.comb)[2,2],2)
Compare.tab1[2, 1] = "Two Region"
Compare.tab1[2, 2] = round(GetMets(JModel.2Reg)[1,2],2)
Compare.tab1[2, 3] = round(GetMets(JModel.2Reg)[2,2],2)
Compare.tab1[3, 1] = "Three Region"
Compare.tab1[3, 2] = round(GetMets(JModel.full)[1,2],2)
Compare.tab1[3, 3] = round(GetMets(JModel.full)[2,2],2)
kable(Compare.tab, caption = "Environment Level") %>%
kable_styling("striped", full_width = F) %>%
row_spec(0, font_size = 20) %>%
column_spec(1, bold = T)
Model | DIC | WAIC | Sensitivity | Specificity | AUC | TSS |
---|---|---|---|---|---|---|
Combined | 1509.71 | 1334.57 | 0.981 | 0.513 | 0.682 | 0.493 |
Two Region | 1382.65 | 1201.68 | 0.989 | 0.550 | 0.726 | 0.539 |
Three Region | 1491.94 | 1283.79 | 0.915 | 0.611 | 0.755 | 0.526 |
kable(Compare.tab1, caption = "Detection Level") %>%
kable_styling("striped", full_width = F) %>%
row_spec(0, font_size = 20) %>%
column_spec(1, bold = T)
Model | DIC | WAIC |
---|---|---|
Combined | 861.48 | 1437.08 |
Two Region | 860.80 | 1431.46 |
Three Region | 863.43 | 1463.42 |
#Two Regions
#"Wisconsin + UP"
Reg2.WI_UP = Obs.set.R2.keep@data %>% filter(Region == "Wisconsin + UP")
Reg2.WI_UP = as.data.frame(cbind(1:dim(Reg2.WI_UP)[1], Reg2.WI_UP$OBS, Reg2.WI_UP$Pred.Full))
names(Reg2.WI_UP) = c("ID", "OBS", "Pred")
Thresh = optimal.thresholds(Reg2.WI_UP, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh
## Method Pred
## 1 MaxSens+Spec 0.90
## 2 PredPrev=Obs 0.95
Reg2.WI_UP.out = presence.absence.accuracy(Reg2.WI_UP, threshold = Thresh[1,2])
#Calculate TSS
Reg2.WI_UP.out$TSS = Reg2.WI_UP.out$sensitivity + Reg2.WI_UP.out$specificity - 1
Reg2.WI_UP.out$Region = "Wisconsin + UP"
#"Lower"
Reg2.WI_Low = Obs.set.R2.keep@data %>% filter(Region == "Lower")
Reg2.WI_Low = as.data.frame(cbind(1:dim(Reg2.WI_Low)[1], Reg2.WI_Low$OBS, Reg2.WI_Low$Pred.Full))
names(Reg2.WI_Low) = c("ID", "OBS", "Pred")
Thresh = optimal.thresholds(Reg2.WI_Low, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh
## Method Pred
## 1 MaxSens+Spec 0.92
## 2 PredPrev=Obs 0.98
Reg2.WI_Low.out = presence.absence.accuracy(Reg2.WI_Low, threshold = Thresh[1,2])
#Calculate TSS
Reg2.WI_Low.out$TSS = Reg2.WI_Low.out$sensitivity + Reg2.WI_Low.out$specificity - 1
Reg2.WI_Low.out$Region = "Lower"
#Three Regions Model
##"Wisconsin"
Reg3.WI = Obs.set.R3.keep %>% filter(Region == "Wisconsin")
Reg3.WI = as.data.frame(cbind(1:dim(Reg3.WI)[1], Reg3.WI$OBS, Reg3.WI$Pred.Full))
names(Reg3.WI) = c("ID", "OBS", "Pred")
Thresh = optimal.thresholds(Reg3.WI, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh
## Method Pred
## 1 MaxSens+Spec 0.93
## 2 PredPrev=Obs 0.99
Reg3.WI.out = presence.absence.accuracy(Reg3.WI, threshold = Thresh[1,2])
#Calculate TSS
Reg3.WI.out$TSS = Reg3.WI.out$sensitivity + Reg3.WI.out$specificity - 1
Reg3.WI.out$Region = "Wisconsin"
##"Upper"
Reg3.UP = Obs.set.R3.keep %>% filter(Region == "Upper")
Reg3.UP = as.data.frame(cbind(1:dim(Reg3.UP)[1], Reg3.UP$OBS, Reg3.UP$Pred.Full))
names(Reg3.UP) = c("ID", "OBS", "Pred")
Thresh = optimal.thresholds(Reg3.UP, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh
## Method Pred
## 1 MaxSens+Spec 0.94
## 2 PredPrev=Obs 0.96
Reg3.UP.out = presence.absence.accuracy(Reg3.UP, threshold = Thresh[1,2])
#Calculate TSS
Reg3.UP.out$TSS = Reg3.UP.out$sensitivity + Reg3.UP.out$specificity - 1
Reg3.UP.out$Region = "Upper"
##"Lower"
Reg3.low = Obs.set.R3.keep %>% filter(Region == "Lower")
Reg3.low = as.data.frame(cbind(1:dim(Reg3.low)[1], Reg3.low$OBS, Reg3.low$Pred.Full))
names(Reg3.low) = c("ID", "OBS", "Pred")
Thresh = optimal.thresholds(Reg3.low, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh
## Method Pred
## 1 MaxSens+Spec 0.95
## 2 PredPrev=Obs 0.99
Reg3.low.out = presence.absence.accuracy(Reg3.low, threshold = Thresh[1,2])
#Calculate TSS
Reg3.low.out$TSS = Reg3.low.out$sensitivity + Reg3.low.out$specificity - 1
Reg3.low.out$Region = "Lower"
#Combined Model ################
#Obs.set.comb.keep
##"Wisconsin"
CombWI = Obs.set.comb.keep %>% filter(StateUP == "Wisconsin")
CombWI = as.data.frame(cbind(1:dim(CombWI)[1], CombWI$OBS, CombWI$Pred.Comb))
names(CombWI) = c("ID", "OBS", "Pred")
Thresh = optimal.thresholds(CombWI, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh
## Method Pred
## 1 MaxSens+Spec 0.86
## 2 PredPrev=Obs 0.81
CombWI.out = presence.absence.accuracy(CombWI, threshold = Thresh[1,2])
#Calculate TSS
CombWI.out$TSS = CombWI.out$sensitivity + CombWI.out$specificity - 1
CombWI.out$Region = "Wisconsin"
##"Upper"
CombUP = Obs.set.comb.keep %>% filter(StateUP == "Mich.UP")
CombUP = as.data.frame(cbind(1:dim(CombUP)[1], CombUP$OBS, CombUP$Pred.Comb))
names(CombUP) = c("ID", "OBS", "Pred")
Thresh = optimal.thresholds(CombUP, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh
## Method Pred
## 1 MaxSens+Spec 0.83
## 2 PredPrev=Obs 0.85
CombUP.out = presence.absence.accuracy(CombUP, threshold = Thresh[1,2])
#Calculate TSS
CombUP.out$TSS = CombUP.out$sensitivity + CombUP.out$specificity - 1
CombUP.out$Region = "Upper"
##"Lower"
Comblow = Obs.set.comb.keep %>% filter(StateUP == "Michigan")
Comblow = as.data.frame(cbind(1:dim(Comblow)[1], Comblow$OBS, Comblow$Pred.Comb))
names(Comblow) = c("ID", "OBS", "Pred")
Thresh = optimal.thresholds(Comblow, opt.methods = c("MaxSens+Spec", "PredPrev=Obs"))
Thresh
## Method Pred
## 1 MaxSens+Spec 0.93
## 2 PredPrev=Obs 0.92
Comblow.out = presence.absence.accuracy(Comblow, threshold = Thresh[1,2])
#Calculate TSS
Comblow.out$TSS = Comblow.out$sensitivity + Comblow.out$specificity - 1
Comblow.out$Region = "Lower"
#Create Table
Reg2.WI_UP.out$Model = "Two Region"
Reg2.WI_Low.out$Model = "Two Region"
Reg3.WI.out$Model = "Three Region"
Reg3.UP.out$Model = "Three Region"
Reg3.low.out$Model = "Three Region"
CombWI.out$Model = "Combined"
CombUP.out$Model = "Combined"
Comblow.out$Model = "Combined"
All.data = rbind(CombWI.out, CombUP.out, Comblow.out, Reg2.WI_Low.out, Reg2.WI_UP.out,
Reg3.WI.out, Reg3.UP.out, Reg3.low.out)
All.data = All.data %>% select(Model, Region, sensitivity, specificity, AUC, TSS)
names(All.data) = c("Model", "Region", "Sensitivity", "Specificity", "AUC", "TSS")
kable(All.data, caption = "Metrics by Region") %>%
kable_styling("striped", full_width = F) %>%
row_spec(0, font_size = 20) %>%
column_spec(1, bold = T)
Model | Region | Sensitivity | Specificity | AUC | TSS |
---|---|---|---|---|---|
Combined | Wisconsin | 0.6136364 | 0.5714286 | 0.5649351 | 0.1850649 |
Combined | Upper | 0.9424084 | 0.3367935 | 0.6586925 | 0.2792019 |
Combined | Lower | 0.8518519 | 0.5000000 | 0.7129630 | 0.3518519 |
Two Region | Lower | 0.9259259 | 0.7477403 | 0.8029459 | 0.6736663 |
Two Region | Wisconsin + UP | 0.9909910 | 0.4951993 | 0.7021378 | 0.4861903 |
Three Region | Wisconsin | 0.4318182 | 0.7655521 | 0.5654249 | 0.1973703 |
Three Region | Upper | 0.8125654 | 0.3757225 | 0.5701522 | 0.1882880 |
Three Region | Lower | 0.9259259 | 0.7444536 | 0.8018199 | 0.6703795 |
#Prediction
Pred.pnts = zGrd.pnts
ModResult = JModel.full
#Update Region ID for prediction locations (MI and WI only)
MI_WI_Domain = subset(Domain, Name == "Michigan" | Name == "Wisconsin")
Pred.pnts = zGrd.pnts
MI_WI_DomainP = spTransform(MI_WI_Domain, proj4string(Pred.pnts))
Pred.pnts$nDom = over(Pred.pnts, MI_WI_DomainP)[,1]
Pred.pntsN = subset(Pred.pnts, is.na(nDom) == FALSE)
Pred.pntsN$Region = ifelse(Pred.pntsN@data$nDom == 21 & Pred.pntsN@data$UP == "FALSE", "Upper", #FALSE indicates UP
ifelse(Pred.pntsN@data$nDom == 21 & Pred.pntsN@data$UP == "TRUE", "Lower",
ifelse(Pred.pntsN@data$nDom == 48, "Wisconsin", NA)))
#Get RF
pLoc = cbind(Pred.pntsN@coords[,1], Pred.pntsN@coords[,2])
Ap = inla.spde.make.A(mesh, loc=pLoc)
Pred.pntsN$Full.rf = drop(Ap %*% ModResult$summary.random$field.env$mean)
#Get Temperature
Comb.plt.df = as.data.frame(JModel.full$summary.random$mMxTemp)[,1:6] #Get effect estimates (Full model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975") #Clean labels
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 3059)
#Region-sepcific estimates
UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")
UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]
#Look up UP Temp Estimates
Pred.pntsN$UP.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - UP.lu$ID)))
Pred.pntsN$UP.TmpEst = as.numeric(with(UP.lu,
Mean[match(Pred.pntsN$UP.temp,
UP.POS)]))
#Look up Lower Temp Estimates
Pred.pntsN$LW.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
Mean[match(Pred.pntsN$LW.temp,
LW.POS)]))
#Look up WI Temp Estimates
Pred.pntsN$WI.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
Mean[match(Pred.pntsN$WI.temp,
WI.POS)]))
#Match to region
Pred.pntsN$Temp.Full = ifelse(Pred.pntsN$Region == "Upper", Pred.pntsN$UP.TmpEst,
ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst,
ifelse(Pred.pntsN$Region == "Wisconsin", Pred.pntsN$WI.TmpEst, NA)))
#Get Snow Week (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.full$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 84)
UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")
UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]
Pred.pntsN$UP.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - UP.lu$ID)))
Pred.pntsN$UP.TmpEst = as.numeric(with(UP.lu,
Mean[match(Pred.pntsN$UP.temp,
UP.POS)]))
Pred.pntsN$LW.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
Mean[match(Pred.pntsN$LW.temp,
LW.POS)]))
Pred.pntsN$WI.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
Mean[match(Pred.pntsN$WI.temp,
WI.POS)]))
#Match to region
Pred.pntsN$Snow.Full = ifelse(Pred.pntsN$Region == "Upper", Pred.pntsN$UP.TmpEst,
ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst,
ifelse(Pred.pntsN$Region == "Wisconsin", Pred.pntsN$WI.TmpEst, NA)))
#Get Forest (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.full$summary.random$Forest)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Upper", "Lower", "Wisconsin"), each = 110)
UP.lu = Comb.plt.df %>% filter(Region == "Upper")
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin")
UP.lu$UP.POS = 1:dim(UP.lu)[1]
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]
Pred.pntsN$UP.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - UP.lu$ID)))
Pred.pntsN$UP.TmpEst = as.numeric(with(UP.lu,
Mean[match(Pred.pntsN$UP.temp,
UP.POS)]))
Pred.pntsN$LW.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
Mean[match(Pred.pntsN$LW.temp,
LW.POS)]))
Pred.pntsN$WI.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
Mean[match(Pred.pntsN$WI.temp,
WI.POS)]))
#Match to region
Pred.pntsN$Forest.Full = ifelse(Pred.pntsN$Region == "Upper", Pred.pntsN$UP.TmpEst,
ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst,
ifelse(Pred.pntsN$Region == "Wisconsin", Pred.pntsN$WI.TmpEst, NA)))
Pred.pntsN@data = Pred.pntsN@data %>%
mutate(Pred.Full = plogis(Temp.Full + Snow.Full + Forest.Full))
zFull.pred.r = rasterize(spTransform(
Pred.pntsN,
CRS(proj4string(Z_samp))),
Z_samp,
"Pred.Full",
background = NA)
##Combined Model Predictions
ModResult = JModel.comb
Pred.pnts = Pred.pntsN
#Get RF
pLoc = cbind(Pred.pntsN@coords[,1], Pred.pntsN@coords[,2])
Ap = inla.spde.make.A(mesh, loc=pLoc)
Pred.pntsN$Comb.rf = drop(Ap %*% ModResult$summary.random$field.env$mean)
#Get Temperature
Comb.plt.df = as.data.frame(JModel.comb$summary.random$mMxTemp)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Pred.pntsN$UP.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Pred.pntsN$cmb.TmpEst = as.numeric(with(Comb.plt.df,
Mean[match(Pred.pntsN$UP.temp,
Comb.POS)]))
#Get Snow
Comb.plt.df = as.data.frame(JModel.comb$summary.random$mSnw5yr)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Pred.pntsN$UP.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Pred.pntsN$Cmb.SnwEst = as.numeric(with(Comb.plt.df,
Mean[match(Pred.pntsN$UP.temp,
Comb.POS)]))
#Get Forest
Comb.plt.df = as.data.frame(JModel.comb$summary.random$Forest)[,1:6] #Get effect estimates (Comb model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
#Look up UP Temp Estimates
Comb.plt.df$Comb.POS = 1:dim(Comb.plt.df)[1]
Pred.pntsN$UP.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - Comb.plt.df$ID)))
Pred.pntsN$Cmb.ForEst = as.numeric(with(Comb.plt.df,
Mean[match(Pred.pntsN$UP.temp,
Comb.POS)]))
Pred.pntsN@data = Pred.pntsN@data %>%
mutate(Pred.Comb = plogis(cmb.TmpEst + Cmb.SnwEst + Cmb.ForEst))
zComb.pred.r = rasterize(spTransform(
Pred.pntsN,
CRS(proj4string(Z_samp))),
Z_samp,
"Pred.Comb",
background = NA)
##Two Region Model
ModResult = JModel.2Reg
#Update Region ID for prediction locations (MI and WI only)
MI_WI_Domain = subset(Domain, Name == "Michigan" | Name == "Wisconsin")
Pred.pnts = Pred.pntsN
MI_WI_DomainP = spTransform(MI_WI_Domain, proj4string(Pred.pnts))
Pred.pnts$nDom = over(Pred.pnts, MI_WI_DomainP)[,1]
Pred.pntsN = subset(Pred.pnts, is.na(nDom) == FALSE)
Pred.pntsN$Region = ifelse(Pred.pntsN@data$nDom == 21 & Pred.pntsN@data$UP == "FALSE", "Upper", #FALSE indicates UP
ifelse(Pred.pntsN@data$nDom == 21 & Pred.pntsN@data$UP == "TRUE", "Lower",
ifelse(Pred.pntsN@data$nDom == 48, "Wisconsin", NA)))
Pred.pntsN$Region = ifelse(Pred.pntsN$Region == "Upper" | Pred.pntsN$Region == "Wisconsin", "Wisconsin + UP", Pred.pntsN$Region)
#Get RF
pLoc = cbind(Pred.pntsN@coords[,1], Pred.pntsN@coords[,2])
Ap = inla.spde.make.A(mesh, loc=pLoc)
Pred.pntsN$REg2.rf = drop(Ap %*% ModResult$summary.random$field.env$mean)
#Get Temperature
Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$mMxTemp)[,1:6] #Get effect estimates (Full model)
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975") #Clean labels
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 3059)
#Region-sepcific estimates
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin + Upper")
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]
#Look up Lower Temp Estimates
Pred.pntsN$LW.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
Mean[match(Pred.pntsN$LW.temp,
LW.POS)]))
#Look up WI + UP Temp Estimates
Pred.pntsN$WI.temp = sapply(Pred.pntsN$mxTempE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
Mean[match(Pred.pntsN$WI.temp,
WI.POS)]))
#Match to region
Pred.pntsN$Temp.Full = ifelse(Pred.pntsN$Region == "Wisconsin + UP", Pred.pntsN$WI.TmpEst,
ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst, NA))
#Get Snow Week (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$mSnw5yr)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 84)
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin + Upper")
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]
Pred.pntsN$LW.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
Mean[match(Pred.pntsN$LW.temp,
LW.POS)]))
Pred.pntsN$WI.temp = sapply(Pred.pntsN$mSnow5yrE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
Mean[match(Pred.pntsN$WI.temp,
WI.POS)]))
#Match to region
Pred.pntsN$Snow.Full = ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst,
ifelse(Pred.pntsN$Region == "Wisconsin + UP", Pred.pntsN$WI.TmpEst, NA))
#Get Forest (same process as above, overwriting a few lables)
Comb.plt.df = as.data.frame(JModel.2Reg$summary.random$Forest)[,1:6]
names(Comb.plt.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")
Comb.plt.df$Region = rep(c("Wisconsin + Upper", "Lower"), each = 110)
LW.lu = Comb.plt.df %>% filter(Region == "Lower")
WI.lu = Comb.plt.df %>% filter(Region == "Wisconsin + Upper")
LW.lu$LW.POS = 1:dim(LW.lu)[1]
WI.lu$WI.POS = 1:dim(WI.lu)[1]
Pred.pntsN$LW.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - LW.lu$ID)))
Pred.pntsN$LW.TmpEst = as.numeric(with(LW.lu,
Mean[match(Pred.pntsN$LW.temp,
LW.POS)]))
Pred.pntsN$WI.temp = sapply(Pred.pntsN$Forest1kmE, function(x)which.min(abs(x - WI.lu$ID)))
Pred.pntsN$WI.TmpEst = as.numeric(with(WI.lu,
Mean[match(Pred.pntsN$WI.temp,
WI.POS)]))
#Match to region
Pred.pntsN$Forest.Full = ifelse(Pred.pntsN$Region == "Lower", Pred.pntsN$LW.TmpEst,
ifelse(Pred.pntsN$Region == "Wisconsin + UP", Pred.pntsN$WI.TmpEst, NA))
Pred.pntsN@data = Pred.pntsN@data %>%
mutate(Pred.Full = plogis(Temp.Full + Snow.Full + Forest.Full))
zReg2.pred.r = rasterize(spTransform(
Pred.pntsN,
CRS(proj4string(Z_samp))),
Z_samp,
"Pred.Full",
background = NA)
zPred.stk = stack(zComb.pred.r, zReg2.pred.r, zFull.pred.r)
names(Pred.stk) = c("Combined", "REg2", "Reg3")
rng = seq(0, 1, 0.01)
mCols = brewer.pal(9, "YlOrBr")
cr = colorRampPalette(mCols)(n = 500)
cr = colorRampPalette(cr,
bias = 1, space = "rgb")
pred.plt = levelplot(zPred.stk, #Trial.comp,
layout = c(3,1),
margin = FALSE,
xlab = NULL,
ylab = NULL,
#main = "Occurrence Probabilty",
names.attr= c("Combined", "Two Region", "Three Region"),
maxpixels = 1e5,
col.regions = cr, at = rng,
colorkey = list(labels=list(cex=1.5),
space = "bottom"),
par.strip.text = list(fontface='bold', cex=1.5),
par.settings = list(axis.line = list(col = "black"),
strip.background = list(col = 'transparent'),
strip.border = list(col = 'transparent')),
scales = list(cex = 1.25)) +
latticeExtra::layer(sp.polygons(States_map, col = "black", lwd = 1)) +
latticeExtra::layer(sp.polygons(Fit.pnt, col = "black", pch =19, cex=1)) +
latticeExtra::layer({SpatialPolygonsRescale(layout.north.arrow(),
offset = c(-83, 45.5),
scale = 2)})
cExt = extent(zPred.stk)
zZext = spTransform(Zext, CRS(proj4string(Forest1km)))
zForest = crop(Forest1km, zZext)
Fit.pntP2 = spTransform(Fit.pntP, CRS(proj4string(Forest1km)))
rng = seq(0, 1090, 1)
mCols = brewer.pal(9, "YlGn")
cr = colorRampPalette(mCols)(n = 2000)
fplt = levelplot(zForest,
margin = FALSE,
xlab = NULL,
ylab = NULL,
#main = "Occurrence Probabilty",
maxpixels = 1e5,
col.regions = cr, at = rng,
colorkey = list(labels=list(cex=1.5),
space = "bottom"),
par.strip.text = list(fontface='bold', cex=1.5),
par.settings = list(axis.line = list(col = "black"),
strip.background = list(col = 'transparent'),
strip.border = list(col = 'transparent')),
scales = list(cex = 1.25)) +
latticeExtra::layer(sp.polygons(Fit.pntP2, col = "red", pch =19, cex=1)) +
#latticeExtra::layer(sp.polygons(Hare.pnt , col = "black", pch=2, cex = 0.25)) +
latticeExtra::layer({SpatialPolygonsRescale(layout.north.arrow(),
offset = c(-83, 45.5),
scale = 2)})
fplt
pred.plt