Set WD
setwd("D:/Marten2")
Load Libraries
Needed.packages = c("rgdal", "raster", "corrplot", "spdep",
"rgeos","maptools","mapproj","GISTools",
"sp","ggplot2","ggthemes","plyr","dplyr",
"kableExtra")
for(p in Needed.packages){
if(!require(p, character.only = TRUE)) install.packages(p)
suppressMessages(library(p, character.only = TRUE))
}
## Loading required package: rgdal
## Loading required package: sp
## rgdal: version: 1.4-3, (SVN revision 828)
## Geospatial Data Abstraction Library extensions to R successfully loaded
## Loaded GDAL runtime: GDAL 2.2.3, released 2017/11/20
## Path to GDAL shared files: C:/Program Files/R/R-3.5.3/library/rgdal/gdal
## GDAL binary built with GEOS: TRUE
## Loaded PROJ.4 runtime: Rel. 4.9.3, 15 August 2016, [PJ_VERSION: 493]
## Path to PROJ.4 shared files: C:/Program Files/R/R-3.5.3/library/rgdal/proj
## Linking to sp version: 1.3-1
## Loading required package: raster
## Loading required package: corrplot
## corrplot 0.84 loaded
## Loading required package: spdep
## Loading required package: Matrix
## Loading required package: spData
## To access larger datasets in this package, install the spDataLarge
## package with: `install.packages('spDataLarge',
## repos='https://nowosad.github.io/drat/', type='source')`
## Loading required package: sf
## Linking to GEOS 3.6.1, GDAL 2.2.3, PROJ 4.9.3
## Loading required package: rgeos
## rgeos version: 0.4-2, (SVN revision 581)
## GEOS runtime version: 3.6.1-CAPI-1.10.1
## Linking to sp version: 1.3-1
## Polygon checking: TRUE
## Loading required package: maptools
## Checking rgeos availability: TRUE
## Loading required package: mapproj
## Loading required package: maps
## Loading required package: GISTools
## Loading required package: RColorBrewer
## Loading required package: MASS
##
## Attaching package: 'MASS'
## The following objects are masked from 'package:raster':
##
## area, select
##
## Attaching package: 'GISTools'
## The following object is masked from 'package:maps':
##
## map.scale
## Loading required package: ggplot2
## Loading required package: ggthemes
## Loading required package: plyr
##
## Attaching package: 'plyr'
## The following object is masked from 'package:maps':
##
## ozone
## Loading required package: dplyr
##
## Attaching package: 'dplyr'
## The following objects are masked from 'package:plyr':
##
## arrange, count, desc, failwith, id, mutate, rename, summarise,
## summarize
## The following object is masked from 'package:MASS':
##
## select
## The following objects are masked from 'package:rgeos':
##
## intersect, setdiff, union
## The following objects are masked from 'package:raster':
##
## intersect, select, union
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
## Loading required package: kableExtra
##
## Attaching package: 'kableExtra'
## The following object is masked from 'package:dplyr':
##
## group_rows
#INLA not on Cran
if(!require("INLA", character.only = TRUE)) install.packages("INLA", repos=c(getOption("repos"), INLA="https://inla.r-inla-download.org/R/stable"), dep=TRUE)
## Loading required package: INLA
## This is INLA_18.07.12 built 2018-07-12 11:05:18 UTC.
## See www.r-inla.org/contact-us for how to get help.
suppressMessages(library("INLA", character.only = TRUE))
Load Data
#martenscl <- read.csv("Marten_Scale.csv", header=T)
martenscl = read.csv("Marten_Modeling.csv", header=T)
head(martenscl)
## FID_1 Field1 GMT_Time Duration DOP Satellites id Longitude Latitude
## 1 0 26816 4 6.0 4 5516m 658223.8 5094504
## 2 1 26916 4 7.2 4 5516m 657887.5 5094635
## 3 2 26716 68 4.8 4 5516m 658432.3 5094552
## 4 7 21121 35 3.2 4 5516m 656835.5 5095063
## 5 3 6947 25 7.6 4 5516m 656981.3 5094983
## 6 4 11666 23 7.0 4 5516m 657008.5 5095027
## RASTERVALU Area Area_MC90 Area_HO90 Area_YC90 Area_HO120
## 1 0.69615316 0.45248816 0.00000 0 0.0000 0
## 2 0.53853881 0.99558746 2053.13217 0 0.0000 0
## 3 0.74680561 1.00000000 0.00000 0 0.0000 0
## 4 0.04130336 0.08082647 97.77849 0 240.2753 0
## 5 0.05860979 0.94163814 0.00000 0 0.0000 0
## 6 0.04371836 0.94163814 0.00000 0 0.0000 0
## Area_YC120 Area_MC120 Area_HO150 Area_YC150 Area_MC150 Dist_River
## 1 0.0000 855.1882 1040.88852 0.000 0.0000 934.0391
## 2 0.0000 1887.5735 93.76815 0.000 3487.1855 774.2151
## 3 0.0000 0.0000 0.00000 0.000 0.0000 872.2185
## 4 652.9674 616.2176 0.00000 1618.857 248.5517 338.2444
## 5 0.0000 0.0000 0.00000 0.000 0.0000 385.0158
## 6 0.0000 0.0000 0.00000 0.000 0.0000 435.7748
## Dist_Paved Dist_Unpav Area_AC90 Area_AH90 Area_AM90 Area_AC120
## 1 429.95079 3013.808 0.0000 191.5467 0 855.1882
## 2 330.92218 2684.197 2053.1315 144.6394 0 2062.2441
## 3 597.21109 3135.752 0.0000 0.0000 0 0.0000
## 4 15.92333 1719.551 502.5996 690.6044 0 1446.2554
## 5 42.97241 1859.207 0.0000 0.0000 0 0.0000
## 6 93.98086 1835.544 0.0000 0.0000 0 0.0000
## Area_AH120 Area_AM120 Area_AC150 Area_AH150 Area_AM150 Area_YH90
## 1 367.6649 0.0000 0.000 0.000 0.0000 0
## 2 1970.0284 191.5466 3602.865 1718.269 191.5466 0
## 3 0.0000 0.0000 0.000 0.000 0.0000 0
## 4 2889.3007 0.0000 2189.179 9165.870 0.0000 0
## 5 188.1937 0.0000 0.000 1556.017 0.0000 0
## 6 188.1937 0.0000 0.000 1556.017 0.0000 0
## Area_MH90 Area_MH120 Area_YH120 Area_MH150 Area_YH150
## 1 191.5467 367.6649 0 0.000 0
## 2 144.6394 1970.0284 0 1718.269 0
## 3 0.0000 0.0000 0 0.000 0
## 4 690.6044 2889.3007 0 9165.867 0
## 5 0.0000 188.1937 0 1556.017 0
## 6 0.0000 188.1937 0 1556.017 0
length(unique(martenscl$id))
## [1] 13
unique(martenscl$id)
## [1] 5516m 6977m 5529m 8217m houdini 5509m 5517m 1114m
## [9] 5538m 7004m 2254m 5496m 5064f
## 13 Levels: 1114m 2254m 5064f 5496m 5509m 5516m 5517m 5529m 5538m ... houdini
Scale and Center
Cov.scale = martenscl[,12:38]
head(Cov.scale)
## Area_MC90 Area_HO90 Area_YC90 Area_HO120 Area_YC120 Area_MC120
## 1 0.00000 0 0.0000 0 0.0000 855.1882
## 2 2053.13217 0 0.0000 0 0.0000 1887.5735
## 3 0.00000 0 0.0000 0 0.0000 0.0000
## 4 97.77849 0 240.2753 0 652.9674 616.2176
## 5 0.00000 0 0.0000 0 0.0000 0.0000
## 6 0.00000 0 0.0000 0 0.0000 0.0000
## Area_HO150 Area_YC150 Area_MC150 Dist_River Dist_Paved Dist_Unpav
## 1 1040.88852 0.000 0.0000 934.0391 429.95079 3013.808
## 2 93.76815 0.000 3487.1855 774.2151 330.92218 2684.197
## 3 0.00000 0.000 0.0000 872.2185 597.21109 3135.752
## 4 0.00000 1618.857 248.5517 338.2444 15.92333 1719.551
## 5 0.00000 0.000 0.0000 385.0158 42.97241 1859.207
## 6 0.00000 0.000 0.0000 435.7748 93.98086 1835.544
## Area_AC90 Area_AH90 Area_AM90 Area_AC120 Area_AH120 Area_AM120
## 1 0.0000 191.5467 0 855.1882 367.6649 0.0000
## 2 2053.1315 144.6394 0 2062.2441 1970.0284 191.5466
## 3 0.0000 0.0000 0 0.0000 0.0000 0.0000
## 4 502.5996 690.6044 0 1446.2554 2889.3007 0.0000
## 5 0.0000 0.0000 0 0.0000 188.1937 0.0000
## 6 0.0000 0.0000 0 0.0000 188.1937 0.0000
## Area_AC150 Area_AH150 Area_AM150 Area_YH90 Area_MH90 Area_MH120
## 1 0.000 0.000 0.0000 0 191.5467 367.6649
## 2 3602.865 1718.269 191.5466 0 144.6394 1970.0284
## 3 0.000 0.000 0.0000 0 0.0000 0.0000
## 4 2189.179 9165.870 0.0000 0 690.6044 2889.3007
## 5 0.000 1556.017 0.0000 0 0.0000 188.1937
## 6 0.000 1556.017 0.0000 0 0.0000 188.1937
## Area_YH120 Area_MH150 Area_YH150
## 1 0 0.000 0
## 2 0 1718.269 0
## 3 0 0.000 0
## 4 0 9165.867 0
## 5 0 1556.017 0
## 6 0 1556.017 0
for(i in 1:ncol(Cov.scale)){
Cov.scale[,i] = as.numeric(scale(Cov.scale[,i], scale = T, center = T))
}
names(Cov.scale) = paste("s_", names(Cov.scale), sep="")
#Add scaled version to data frame with an "s" in front of column name
Mod.cov.df = cbind(martenscl, Cov.scale)
head(Mod.cov.df)
## FID_1 Field1 GMT_Time Duration DOP Satellites id Longitude Latitude
## 1 0 26816 4 6.0 4 5516m 658223.8 5094504
## 2 1 26916 4 7.2 4 5516m 657887.5 5094635
## 3 2 26716 68 4.8 4 5516m 658432.3 5094552
## 4 7 21121 35 3.2 4 5516m 656835.5 5095063
## 5 3 6947 25 7.6 4 5516m 656981.3 5094983
## 6 4 11666 23 7.0 4 5516m 657008.5 5095027
## RASTERVALU Area Area_MC90 Area_HO90 Area_YC90 Area_HO120
## 1 0.69615316 0.45248816 0.00000 0 0.0000 0
## 2 0.53853881 0.99558746 2053.13217 0 0.0000 0
## 3 0.74680561 1.00000000 0.00000 0 0.0000 0
## 4 0.04130336 0.08082647 97.77849 0 240.2753 0
## 5 0.05860979 0.94163814 0.00000 0 0.0000 0
## 6 0.04371836 0.94163814 0.00000 0 0.0000 0
## Area_YC120 Area_MC120 Area_HO150 Area_YC150 Area_MC150 Dist_River
## 1 0.0000 855.1882 1040.88852 0.000 0.0000 934.0391
## 2 0.0000 1887.5735 93.76815 0.000 3487.1855 774.2151
## 3 0.0000 0.0000 0.00000 0.000 0.0000 872.2185
## 4 652.9674 616.2176 0.00000 1618.857 248.5517 338.2444
## 5 0.0000 0.0000 0.00000 0.000 0.0000 385.0158
## 6 0.0000 0.0000 0.00000 0.000 0.0000 435.7748
## Dist_Paved Dist_Unpav Area_AC90 Area_AH90 Area_AM90 Area_AC120
## 1 429.95079 3013.808 0.0000 191.5467 0 855.1882
## 2 330.92218 2684.197 2053.1315 144.6394 0 2062.2441
## 3 597.21109 3135.752 0.0000 0.0000 0 0.0000
## 4 15.92333 1719.551 502.5996 690.6044 0 1446.2554
## 5 42.97241 1859.207 0.0000 0.0000 0 0.0000
## 6 93.98086 1835.544 0.0000 0.0000 0 0.0000
## Area_AH120 Area_AM120 Area_AC150 Area_AH150 Area_AM150 Area_YH90
## 1 367.6649 0.0000 0.000 0.000 0.0000 0
## 2 1970.0284 191.5466 3602.865 1718.269 191.5466 0
## 3 0.0000 0.0000 0.000 0.000 0.0000 0
## 4 2889.3007 0.0000 2189.179 9165.870 0.0000 0
## 5 188.1937 0.0000 0.000 1556.017 0.0000 0
## 6 188.1937 0.0000 0.000 1556.017 0.0000 0
## Area_MH90 Area_MH120 Area_YH120 Area_MH150 Area_YH150 s_Area_MC90
## 1 191.5467 367.6649 0 0.000 0 -0.78497282
## 2 144.6394 1970.0284 0 1718.269 0 -0.08858507
## 3 0.0000 0.0000 0 0.000 0 -0.78497282
## 4 690.6044 2889.3007 0 9165.867 0 -0.75180801
## 5 0.0000 188.1937 0 1556.017 0 -0.78497282
## 6 0.0000 188.1937 0 1556.017 0 -0.78497282
## s_Area_HO90 s_Area_YC90 s_Area_HO120 s_Area_YC120 s_Area_MC120
## 1 -0.2371896 -0.24641407 -0.2452936 -0.26433977 -0.6440978
## 2 -0.2371896 -0.24641407 -0.2452936 -0.26433977 -0.4395535
## 3 -0.2371896 -0.24641407 -0.2452936 -0.26433977 -0.8135344
## 4 -0.2371896 -0.03464424 -0.2452936 0.04624022 -0.6914446
## 5 -0.2371896 -0.24641407 -0.2452936 -0.26433977 -0.8135344
## 6 -0.2371896 -0.24641407 -0.2452936 -0.26433977 -0.8135344
## s_Area_HO150 s_Area_YC150 s_Area_MC150 s_Dist_River s_Dist_Paved
## 1 0.1037318 -0.2862721 -0.8300625 0.13305576 -0.1447414
## 2 -0.2391546 -0.2862721 -0.3716857 -0.05482433 -0.3842057
## 3 -0.2731015 -0.2862721 -0.8300625 0.06038296 0.2597161
## 4 -0.2731015 0.2204708 -0.7973914 -0.56732685 -1.1459146
## 5 -0.2731015 -0.2862721 -0.8300625 -0.51234499 -1.0805064
## 6 -0.2731015 -0.2862721 -0.8300625 -0.45267552 -0.9571612
## s_Dist_Unpav s_Area_AC90 s_Area_AH90 s_Area_AM90 s_Area_AC120
## 1 -0.5577997 -0.8543658 -0.5822854 -0.1746501 -0.7359240
## 2 -0.6314959 -0.2097858 -0.5978496 -0.1746501 -0.5177023
## 3 -0.5305347 -0.8543658 -0.6458419 -0.1746501 -0.8905321
## 4 -0.8471771 -0.6965748 -0.4166948 -0.1746501 -0.6290659
## 5 -0.8159519 -0.8543658 -0.6458419 -0.1746501 -0.8905321
## 6 -0.8212426 -0.8543658 -0.6458419 -0.1746501 -0.8905321
## s_Area_AH120 s_Area_AM120 s_Area_AC150 s_Area_AH150 s_Area_AM150
## 1 -0.57875018 -0.2199738 -0.9036811 -0.6643799 -0.2181024
## 2 -0.27224899 0.1767426 -0.4768438 -0.4498260 0.1010682
## 3 -0.64907738 -0.2199738 -0.9036811 -0.6643799 -0.2181024
## 4 -0.09640995 -0.2199738 -0.6443254 0.4801286 -0.2181024
## 5 -0.61307956 -0.2199738 -0.9036811 -0.4700858 -0.2181024
## 6 -0.61307956 -0.2199738 -0.9036811 -0.4700858 -0.2181024
## s_Area_YH90 s_Area_MH90 s_Area_MH120 s_Area_YH120 s_Area_MH150
## 1 -0.131986 -0.5571650 -0.54922616 -0.1477767 -0.6322799
## 2 -0.131986 -0.5729332 -0.23938257 -0.1477767 -0.4153491
## 3 -0.131986 -0.6215547 -0.62032028 -0.1477767 -0.6322799
## 4 -0.131986 -0.3894034 -0.06162601 -0.1477767 0.5249071
## 5 -0.131986 -0.6215547 -0.58392989 -0.1477767 -0.4358334
## 6 -0.131986 -0.6215547 -0.58392989 -0.1477767 -0.4358334
## s_Area_YH150
## 1 -0.1640364
## 2 -0.1640364
## 3 -0.1640364
## 4 -0.1640364
## 5 -0.1640364
## 6 -0.1640364
View Correlation Table
Cov.cor = cor(Mod.cov.df[,39:65])
corrplot(Cov.cor,
tl.cex=0.7,
number.cex = 0.6,
method = "number")
Convert to Spatial Points
nProj = "+proj=utm +zone=16 +datum=NAD83 +units=m +no_defs +ellps=GRS80 +towgs84=0,0,0"
Marten.pnts = SpatialPointsDataFrame(Mod.cov.df[,c("Longitude", "Latitude")], Mod.cov.df)
proj4string(Marten.pnts) = nProj
Load County Boundaries
Counties = readOGR(dsn = "./Counties_v17a",
layer = "Counties_v17a",
stringsAsFactors = FALSE)
## OGR data source with driver: ESRI Shapefile
## Source: "D:\Marten2\Counties_v17a", layer: "Counties_v17a"
## with 83 features
## It has 15 fields
## Integer64 fields read as strings: OBJECTID FIPSNUM
Counties = spTransform(Counties, proj4string(Marten.pnts))
Plot Points
wmap_df = fortify(Counties)
## Regions defined for each Polygons
tmp.df = Marten.pnts@data
ggplot(wmap_df, aes(long,lat, group=group)) +
geom_polygon(fill = "lightgray", col="darkgray") +
geom_point(data=tmp.df,
aes(Longitude, Latitude,
group=NULL,
fill=NULL,
col = "red")) +
xlab("Longitude") +
ylab("Latitude") +
ggtitle("Marten Locations") +
coord_equal() +
theme(panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.background = element_blank(),
panel.border = element_blank(),
legend.title = element_blank(),
legend.position = "none",
axis.title.x = element_text(size=22, face="bold"),
axis.title.y = element_text(size=22, face="bold"),
plot.title = element_text(size=24, face="bold", hjust = 0.5))
Calculate Nearest Neighbor Distance by Marten ID
library(spatstat)
## Loading required package: spatstat.data
## Loading required package: nlme
##
## Attaching package: 'nlme'
## The following object is masked from 'package:dplyr':
##
## collapse
## The following object is masked from 'package:raster':
##
## getData
## Loading required package: rpart
##
## spatstat 1.59-0 (nickname: 'J'ai omis les oeufs de caille')
## For an introduction to spatstat, type 'beginner'
##
## Note: spatstat version 1.59-0 is out of date by more than 4 months; we recommend upgrading to the latest version.
##
## Attaching package: 'spatstat'
## The following object is masked from 'package:MASS':
##
## area
## The following objects are masked from 'package:raster':
##
## area, rotate, shift
Mart.ids = levels(as.factor(Marten.pnts$id))
for(i in 1:length(Mart.ids)){ #for all marten IDs
Mart.tmp = subset(Marten.pnts, id == Mart.ids[i]) #choose one marten at a time
P.pp = as.ppp(Mart.tmp) #convert to ppp object
Mart.tmp@data$NN = as.numeric(nndist(P.pp, k = 1)) #find nearest neighboring point
if(i == 1){Marten.df = Mart.tmp@data #re-assemble data frame
} else{Marten.df = rbind(Marten.df, Mart.tmp@data)}
}
#large range
range(Marten.df$NN)
## [1] 0.000 1984.527
#Scale neighbor distance
Marten.df$NNs = Marten.df$NN/100
#Convert back to points
Marten.pnts = SpatialPointsDataFrame(Marten.df[,c("Longitude", "Latitude")], Marten.df)
proj4string(Marten.pnts) = nProj
detach("package:spatstat", unload=TRUE)
Looping through each covariate one at a time with marten ID as a grouping random effect and nearest neigbor distance (within individual marten) to account for spatial bias.
Covariates.list = levels(factor(names(Marten.pnts@data[,39:65]))) #Get names of all covariates
Marten.data = Marten.pnts@data #copy of the data
for(i in 1:length(Covariates.list)){ #for each covariate
Cov.lst = list(list(intercept1 = rep(1, dim(Marten.data)[1])), #intercept = 1
list(Covariate.X = Marten.data[,Covariates.list[i]], #Choose 1 covariate at a time
NN = Marten.data[,"NNs"], #Scaled neighbor distance
ID = Marten.data[,"id"])) #Marten ID
Data.stk = inla.stack(data = list( #Organizing data as a list
RASTERVALU = Marten.data$RASTERVALU), #Response/dependent variable
A = list(1,1), #Not used, empty space
effects = Cov.lst, #Individual covariate, neighbor, and ID
tag = "M1.0") #Just a label
nPrior = list(theta=list(prior = "normal", param=c(0, 5))) #flat prior
Formula = RASTERVALU ~ -1 + intercept1 + #response and intercept
f(ID, #grouping effect for individual marten
model="iid",
constr = TRUE, #enforce a zero mean (average)
hyper = nPrior) +
f(inla.group(NN, #distance as a smooth effect
n = 20, #binning distances because some distances are very close
method = "cut"),
model="rw1",
constr = TRUE,
scale.model = TRUE,
hyper = nPrior ) +
Covariate.X #Individual covariate of interest
Model.X = inla(Formula, #Run model iteration
data = inla.stack.data(Data.stk),
family = "gamma",
verbose = TRUE,
control.fixed = list(prec = 0.001,
prec.intercept = 0.0001),
control.predictor = list(
A = inla.stack.A(Data.stk),
compute = TRUE,
link = 1),
control.inla = list(strategy="gaussian",
int.strategy = "eb"),
control.results = list(return.marginals.random = TRUE,
return.marginals.predictor = TRUE),
control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE))
#Create a data frame to store results
Fixed.effects = Model.X$summary.fixed[,c(1:3,5)]
names(Fixed.effects) = c("Mean", "sd", "Q.025", "Q.975")
#Add comparison metrics DIC and WAIC
Fixed.effects$DIC = Model.X$dic$dic
Fixed.effects$WAIC = Model.X$waic$waic
#Add label to identify actual covariate name
Fixed.effects$Covariate = Covariates.list[i]
#Get estimates for ID effect
ID.effect = Model.X$summary.random$ID[,c(1:4,6)]
names(ID.effect) = c("ID", "Mean", "sd", "Q.025", "Q.975")
ID.effect$Covariate = Covariates.list[i]
#Get estimates for NN distance effect
NN.effect = Model.X$summary.random$`inla.group(NN, n = 20, method = "cut")`[,c(1:4,6)]
names(NN.effect) = c("Distance", "Mean", "sd", "Q.025", "Q.975")
NN.effect$Covariate = Covariates.list[i]
#Combine data for all iterations
if(i == 1){
Fixed.effects.all = Fixed.effects
ID.effect.all = ID.effect
NN.effect.all = NN.effect
} else{
Fixed.effects.all = rbind(Fixed.effects.all, Fixed.effects)
ID.effect.all = rbind(ID.effect.all, ID.effect)
NN.effect.all = rbind(NN.effect.all, NN.effect)}
}
#Determine which are signficant as jusged br credible interval
Fixed.effects.all$Credible = ifelse(Fixed.effects.all$Q.025 >= 0 & Fixed.effects.all$Q.975 >= 0, "Inside",
ifelse(Fixed.effects.all$Q.025 <= 0 & Fixed.effects.all$Q.975 <= 0, "Inside",
"Outside"))
#Are row values the covariate or intercept?
Fixed.effects.all$Type = paste(substr(rownames(Fixed.effects.all), 1, 3))
#Save copies to WD
write.csv(Fixed.effects.all, "./Ind_fixed_effects.csv") #fixed effects
write.csv(ID.effect.all, "./Ind_ID_effect.csv")
write.csv(NN.effect.all, "./Ind_Distance_effect.csv")
Covariate Selection
Looks like s_Area_AC150, s_Area_AH120, s_Area_AM150, s_Area_HO150, s_Area_MC150, s_Area_MH120, s_Dist_Paved, s_Dist_River, s_Dist_Unpav are the winners with good agreement between the DIC and WAIC.
#Remove those "outside" credible interval and intercepts
Fixed.effects.all2 = Fixed.effects.all %>% filter(Credible == "Inside" & Type == "Cov")
#How many dropped
dim(Fixed.effects.all)[1] - dim(Fixed.effects.all2)[1]
## [1] 35
Fixed.effects.all2
## Mean sd Q.025 Q.975 DIC WAIC
## 1 0.15151964 0.02381457 0.10476361 0.19823666 -10597.65 -10596.15
## 2 0.21259406 0.02383549 0.16579696 0.25935211 -10637.00 -10635.44
## 3 0.14896285 0.02339773 0.10302521 0.19486215 -10597.86 -10596.54
## 4 0.29216339 0.01871683 0.25541595 0.32888017 -10811.40 -10806.61
## 5 0.22598206 0.01926155 0.18816514 0.26376742 -10699.66 -10696.42
## 6 0.25074475 0.01802432 0.21535693 0.28610304 -10759.10 -10754.85
## 7 0.17055522 0.01601689 0.13910867 0.20197553 -10716.96 -10713.26
## 8 0.19723367 0.01632722 0.16517783 0.22926276 -10786.47 -10781.38
## 9 0.12933730 0.01666453 0.09661921 0.16202808 -10645.77 -10643.13
## 10 -0.07105576 0.01671303 -0.10386908 -0.03826984 -10574.52 -10574.06
## 11 0.12295161 0.02055672 0.08259184 0.16327770 -10593.46 -10592.05
## 12 0.16227102 0.02049147 0.12203936 0.20246910 -10621.22 -10619.79
## 13 0.12218299 0.02025961 0.08240654 0.16192625 -10594.10 -10592.87
## 14 0.29273587 0.01884944 0.25572806 0.32971279 -10810.33 -10805.50
## 15 0.22903845 0.01945820 0.19083544 0.26720958 -10701.63 -10698.57
## 16 0.25093796 0.01810795 0.21538594 0.28646031 -10758.35 -10754.02
## 17 0.33701837 0.01935856 0.29901100 0.37499402 -10880.06 -10876.48
## 18 -0.56410421 0.03439929 -0.63164162 -0.49662316 -10829.23 -10827.35
## 19 -0.77738058 0.06432073 -0.90366385 -0.65120269 -10713.25 -10710.75
## Covariate Credible Type
## 1 s_Area_AC120 Inside Cov
## 2 s_Area_AC150 Inside Cov
## 3 s_Area_AC90 Inside Cov
## 4 s_Area_AH120 Inside Cov
## 5 s_Area_AH150 Inside Cov
## 6 s_Area_AH90 Inside Cov
## 7 s_Area_AM120 Inside Cov
## 8 s_Area_AM150 Inside Cov
## 9 s_Area_AM90 Inside Cov
## 10 s_Area_HO150 Inside Cov
## 11 s_Area_MC120 Inside Cov
## 12 s_Area_MC150 Inside Cov
## 13 s_Area_MC90 Inside Cov
## 14 s_Area_MH120 Inside Cov
## 15 s_Area_MH150 Inside Cov
## 16 s_Area_MH90 Inside Cov
## 17 s_Dist_Paved Inside Cov
## 18 s_Dist_River Inside Cov
## 19 s_Dist_Unpav Inside Cov
Check correlation between selected covariates
Best.Cov = Marten.pnts@data %>%
dplyr::select(s_Area_AC150, s_Area_AH120, s_Area_AM150, s_Area_HO150,
s_Area_MC150, s_Area_MH120, s_Dist_Paved,
s_Dist_River, s_Dist_Unpav)
Cov.cor2 = cor(Best.Cov)
corrplot(Cov.cor2,
tl.cex=0.7,
number.cex = 0.6,
method = "number")
#The AC, AH, MC, and MH variables look highly correlated. Let's test with a different method:
library(perturb)
##
## Attaching package: 'perturb'
## The following object is masked from 'package:raster':
##
## reclassify
CI = colldiag(Cov.cor2)
CI #Last value in the Index column needs to be less than 30; it's still too high
## Condition
## Index Variance Decomposition Proportions
## intercept s_Area_AC150 s_Area_AH120 s_Area_AM150 s_Area_HO150
## 1 1.000 0.000 0.000 0.000 0.001 0.000
## 2 1.266 0.007 0.000 0.000 0.015 0.000
## 3 1.488 0.001 0.000 0.000 0.010 0.033
## 4 2.061 0.000 0.001 0.000 0.163 0.046
## 5 2.295 0.000 0.000 0.000 0.024 0.033
## 6 2.495 0.002 0.000 0.000 0.125 0.025
## 7 4.312 0.002 0.003 0.000 0.002 0.034
## 8 28.337 0.005 0.759 0.000 0.000 0.018
## 9 170.395 0.983 0.237 1.000 0.659 0.810
## s_Area_MC150 s_Area_MH120 s_Dist_Paved s_Dist_River s_Dist_Unpav
## 1 0.001 0.000 0.003 0.010 0.004
## 2 0.000 0.000 0.012 0.015 0.002
## 3 0.000 0.000 0.008 0.014 0.088
## 4 0.001 0.000 0.028 0.000 0.010
## 5 0.000 0.000 0.104 0.016 0.315
## 6 0.000 0.000 0.122 0.145 0.001
## 7 0.006 0.000 0.103 0.649 0.322
## 8 0.956 0.000 0.008 0.042 0.181
## 9 0.037 1.000 0.612 0.108 0.077
#dropping the MH150 and re-assessing
Best.Cov2 = Best.Cov %>% dplyr::select(-"s_Area_MH120")
Cov.cor3 = cor(Best.Cov2)
CI = colldiag(Cov.cor3)
CI #Now less than 30
## Condition
## Index Variance Decomposition Proportions
## intercept s_Area_AC150 s_Area_AH120 s_Area_AM150 s_Area_HO150
## 1 1.000 0.052 0.001 0.015 0.013 0.001
## 2 1.291 0.444 0.000 0.017 0.007 0.046
## 3 1.510 0.022 0.000 0.095 0.122 0.150
## 4 1.940 0.023 0.001 0.009 0.363 0.301
## 5 2.156 0.001 0.000 0.003 0.088 0.253
## 6 2.483 0.170 0.000 0.143 0.372 0.027
## 7 4.326 0.253 0.004 0.671 0.025 0.165
## 8 26.111 0.035 0.994 0.048 0.010 0.057
## s_Area_MC150 s_Dist_Paved s_Dist_River s_Dist_Unpav
## 1 0.001 0.002 0.023 0.004
## 2 0.000 0.078 0.000 0.053
## 3 0.000 0.010 0.016 0.048
## 4 0.001 0.127 0.001 0.005
## 5 0.000 0.177 0.025 0.353
## 6 0.000 0.236 0.228 0.017
## 7 0.006 0.334 0.677 0.363
## 8 0.992 0.036 0.030 0.157
Organize data
Marten.data = Marten.pnts@data
Global.lst = list(list(intercept1 = rep(1, dim(Marten.data)[1])),
list(s_Area_AC150 = Marten.data[,"s_Area_AC150"],
s_Area_AH120 = Marten.data[,"s_Area_AH120"],
s_Area_AM150 = Marten.data[,"s_Area_AM150"],
s_Area_HO150 = Marten.data[,"s_Area_HO150"],
s_Area_MC150 = Marten.data[,"s_Area_MC150"],
s_Dist_Paved = Marten.data[,"s_Dist_Paved"],
s_Dist_River = Marten.data[,"s_Dist_River"],
s_Dist_Unpav = Marten.data[,"s_Dist_Unpav"],
NN = round(Marten.data[,"NNs"],3),
ID = Marten.data[,"id"]))
Global.stk = inla.stack(data = list(RASTERVALU = Marten.data$RASTERVALU),
A = list(1,1),
effects = Global.lst,
tag = "G1.0")
Run Full Model 1
nPrior = list(theta=list(prior = "normal", param=c(0, 5))) #flat prior
Formula = RASTERVALU ~ -1 + intercept1 + #response and intercept
f(ID, #grouping effect for individual marten
model="iid",
constr = TRUE,
hyper = nPrior) +
f(inla.group(NN, #distance as a smooth effect
n = 20, #
method = "cut"),
model="rw1",
constr = TRUE,
scale.model = TRUE,
hyper = nPrior ) +
s_Area_AC150 + s_Area_AH120 + s_Area_AM150 +
s_Area_HO150 + s_Area_MC150 + s_Dist_Paved + s_Dist_River +
s_Dist_Unpav
GModel.1 = inla(Formula,
data = inla.stack.data(Global.stk),
family = "gamma",
verbose = TRUE,
control.fixed = list(prec = 0.001,
prec.intercept = 0.0001),
control.predictor = list(
A = inla.stack.A(Global.stk),
compute = TRUE,
link = 1),
control.inla = list(strategy="gaussian",
int.strategy = "eb"),
control.results = list(return.marginals.random = TRUE,
return.marginals.predictor = TRUE),
control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE))
summary(GModel.1)
##
## Call:
## c("inla(formula = Formula, family = \"gamma\", data = inla.stack.data(Global.stk), ", " verbose = TRUE, control.compute = list(dic = TRUE, cpo = TRUE, ", " waic = TRUE), control.predictor = list(A = inla.stack.A(Global.stk), ", " compute = TRUE, link = 1), control.inla = list(strategy = \"gaussian\", ", " int.strategy = \"eb\"), control.results = list(return.marginals.random = TRUE, ", " return.marginals.predictor = TRUE), control.fixed = list(prec = 0.001, ", " prec.intercept = 1e-04))")
##
## Time used:
## Pre-processing Running inla Post-processing Total
## 2.5292 29.7904 1.0193 33.3388
##
## Fixed effects:
## mean sd 0.025quant 0.5quant 0.975quant mode kld
## intercept1 0.4033 0.2080 -0.0051 0.4033 0.8113 0.4033 0
## s_Area_AC150 0.4159 0.0445 0.3285 0.4159 0.5033 0.4159 0
## s_Area_AH120 0.4485 0.0194 0.4105 0.4485 0.4865 0.4485 0
## s_Area_AM150 0.1585 0.0148 0.1294 0.1585 0.1875 0.1585 0
## s_Area_HO150 0.0123 0.0173 -0.0217 0.0123 0.0463 0.0123 0
## s_Area_MC150 0.0270 0.0381 -0.0478 0.0270 0.1017 0.0270 0
## s_Dist_Paved 0.3056 0.0189 0.2684 0.3056 0.3427 0.3056 0
## s_Dist_River -0.5638 0.0332 -0.6290 -0.5638 -0.4986 -0.5638 0
## s_Dist_Unpav -1.2449 0.0620 -1.3667 -1.2449 -1.1232 -1.2449 0
##
## Random effects:
## Name Model
## ID IID model
## inla.group(NN, n = 20, method = "cut") RW1 model
##
## Model hyperparameters:
## mean sd
## Precision parameter for the Gamma observations 0.6271 0.0082
## Precision for ID 0.5023 0.1345
## Precision for inla.group(NN, n = 20, method = "cut") 1.1510 0.4336
## 0.025quant 0.5quant
## Precision parameter for the Gamma observations 0.6111 0.6271
## Precision for ID 0.2754 0.4910
## Precision for inla.group(NN, n = 20, method = "cut") 0.5253 1.0775
## 0.975quant mode
## Precision parameter for the Gamma observations 0.6432 0.6271
## Precision for ID 0.7989 0.4693
## Precision for inla.group(NN, n = 20, method = "cut") 2.2062 0.9445
##
## Expected number of effective parameters(std dev): 26.56(0.00)
## Number of equivalent replicates : 317.37
##
## Deviance Information Criterion (DIC) ...............: -12217.39
## Deviance Information Criterion (DIC, saturated) ....: -1.078e+141
## Effective number of parameters .....................: 26.98
##
## Watanabe-Akaike information criterion (WAIC) ...: -12205.28
## Effective number of parameters .................: 36.45
##
## Marginal log-Likelihood: 5998.03
## CPO and PIT are computed
##
## Posterior marginals for linear predictor and fitted values computed
Run Global Model 2 Dropping s_Area_HO150 and s_Area_MC150 due to being outside credible interval in Global Model 1.
nPrior = list(theta=list(prior = "normal", param=c(0, 5))) #flat prior
Formula = RASTERVALU ~ -1 + intercept1 +
f(ID,
model="iid",
constr = TRUE,
hyper = nPrior) +
f(inla.group(NN,
n = 20, #
method = "cut"),
model="rw1",
constr = TRUE,
scale.model = TRUE,
hyper = nPrior ) +
s_Area_AC150 + s_Area_AH120 + s_Area_AM150 +
s_Dist_Paved + s_Dist_River +
s_Dist_Unpav
GModel.2 = inla(Formula,
data = inla.stack.data(Global.stk),
family = "gamma",
verbose = TRUE,
control.fixed = list(prec = 0.001,
prec.intercept = 0.0001),
control.predictor = list(
A = inla.stack.A(Global.stk),
compute = TRUE,
link = 1),
control.inla = list(strategy="gaussian",
int.strategy = "eb"),
control.results = list(return.marginals.random = TRUE,
return.marginals.predictor = TRUE),
control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE))
summary(GModel.2)
##
## Call:
## c("inla(formula = Formula, family = \"gamma\", data = inla.stack.data(Global.stk), ", " verbose = TRUE, control.compute = list(dic = TRUE, cpo = TRUE, ", " waic = TRUE), control.predictor = list(A = inla.stack.A(Global.stk), ", " compute = TRUE, link = 1), control.inla = list(strategy = \"gaussian\", ", " int.strategy = \"eb\"), control.results = list(return.marginals.random = TRUE, ", " return.marginals.predictor = TRUE), control.fixed = list(prec = 0.001, ", " prec.intercept = 1e-04))")
##
## Time used:
## Pre-processing Running inla Post-processing Total
## 2.4285 59.1723 0.9804 62.5811
##
## Fixed effects:
## mean sd 0.025quant 0.5quant 0.975quant mode kld
## intercept1 0.4125 0.2229 -0.0252 0.4125 0.8498 0.4125 0
## s_Area_AC150 0.4379 0.0264 0.3860 0.4379 0.4897 0.4379 0
## s_Area_AH120 0.4468 0.0204 0.4068 0.4468 0.4868 0.4468 0
## s_Area_AM150 0.1592 0.0157 0.1284 0.1592 0.1899 0.1592 0
## s_Dist_Paved 0.3052 0.0201 0.2658 0.3052 0.3445 0.3052 0
## s_Dist_River -0.5600 0.0349 -0.6285 -0.5600 -0.4915 -0.5600 0
## s_Dist_Unpav -1.2601 0.0651 -1.3879 -1.2601 -1.1323 -1.2601 0
##
## Random effects:
## Name Model
## ID IID model
## inla.group(NN, n = 20, method = "cut") RW1 model
##
## Model hyperparameters:
## mean sd
## Precision parameter for the Gamma observations 0.5619 0.0053
## Precision for ID 0.5476 0.2506
## Precision for inla.group(NN, n = 20, method = "cut") 1.3430 0.8756
## 0.025quant 0.5quant
## Precision parameter for the Gamma observations 0.5538 0.5611
## Precision for ID 0.2703 0.4802
## Precision for inla.group(NN, n = 20, method = "cut") 0.5072 1.0860
## 0.975quant mode
## Precision parameter for the Gamma observations 0.5741 0.5581
## Precision for ID 1.2053 0.3738
## Precision for inla.group(NN, n = 20, method = "cut") 3.6701 0.7648
##
## Expected number of effective parameters(std dev): 24.75(0.00)
## Number of equivalent replicates : 340.67
##
## Deviance Information Criterion (DIC) ...............: -12138.80
## Deviance Information Criterion (DIC, saturated) ....: -2.218e+154
## Effective number of parameters .....................: 25.26
##
## Watanabe-Akaike information criterion (WAIC) ...: -12128.68
## Effective number of parameters .................: 32.38
##
## Marginal log-Likelihood: 5971.92
## CPO and PIT are computed
##
## Posterior marginals for linear predictor and fitted values computed
The second global model looks the best. Here we re-run without the spatial (NN) and grouping effects to verify they were important.
Run Non Spatial Model Dropping NN.
nPrior = list(theta=list(prior = "normal", param=c(0, 5)))
Formula = RASTERVALU ~ -1 + intercept1 +
f(ID,
model="iid",
constr = TRUE,
hyper = nPrior) +
s_Area_AC150 + s_Area_AH120 + s_Area_AM150 +
s_Dist_Paved + s_Dist_River +
s_Dist_Unpav
Non.Spatial.mod = inla(Formula,
data = inla.stack.data(Global.stk),
family = "gamma",
verbose = TRUE,
control.fixed = list(prec = 0.001,
prec.intercept = 0.0001),
control.predictor = list(
A = inla.stack.A(Global.stk),
compute = TRUE,
link = 1),
control.inla = list(strategy="gaussian",
int.strategy = "eb"),
control.results = list(return.marginals.random = TRUE,
return.marginals.predictor = TRUE),
control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE))
summary(Non.Spatial.mod)
##
## Call:
## c("inla(formula = Formula, family = \"gamma\", data = inla.stack.data(Global.stk), ", " verbose = TRUE, control.compute = list(dic = TRUE, cpo = TRUE, ", " waic = TRUE), control.predictor = list(A = inla.stack.A(Global.stk), ", " compute = TRUE, link = 1), control.inla = list(strategy = \"gaussian\", ", " int.strategy = \"eb\"), control.results = list(return.marginals.random = TRUE, ", " return.marginals.predictor = TRUE), control.fixed = list(prec = 0.001, ", " prec.intercept = 1e-04))")
##
## Time used:
## Pre-processing Running inla Post-processing Total
## 1.8889 22.0770 0.9525 24.9184
##
## Fixed effects:
## mean sd 0.025quant 0.5quant 0.975quant mode kld
## intercept1 -1.2404 0.0187 -1.2771 -1.2404 -1.2037 -1.2404 0
## s_Area_AC150 0.3893 0.0248 0.3406 0.3893 0.4379 0.3893 0
## s_Area_AH120 0.4179 0.0193 0.3801 0.4179 0.4557 0.4179 0
## s_Area_AM150 0.1471 0.0151 0.1175 0.1471 0.1767 0.1471 0
## s_Dist_Paved 0.2897 0.0189 0.2525 0.2897 0.3269 0.2897 0
## s_Dist_River -0.4743 0.0320 -0.5371 -0.4743 -0.4116 -0.4743 0
## s_Dist_Unpav -1.0685 0.0599 -1.1862 -1.0685 -0.9509 -1.0685 0
##
## Random effects:
## Name Model
## ID IID model
##
## Model hyperparameters:
## mean sd 0.025quant
## Precision parameter for the Gamma observations 0.5990 0.0078 0.5839
## Precision for ID 0.6053 0.1694 0.3363
## 0.5quant 0.975quant mode
## Precision parameter for the Gamma observations 0.5990 0.6144 0.5990
## Precision for ID 0.5842 0.9977 0.5442
##
## Expected number of effective parameters(std dev): 18.96(0.00)
## Number of equivalent replicates : 444.54
##
## Deviance Information Criterion (DIC) ...............: -11680.83
## Deviance Information Criterion (DIC, saturated) ....: -2.022e+146
## Effective number of parameters .....................: 18.99
##
## Watanabe-Akaike information criterion (WAIC) ...: -11669.37
## Effective number of parameters .................: 29.52
##
## Marginal log-Likelihood: 5752.98
## CPO and PIT are computed
##
## Posterior marginals for linear predictor and fitted values computed
Run spatial model without grouping by ID effect
nPrior = list(theta=list(prior = "normal", param=c(0, 5))) #flat prior
Formula = RASTERVALU ~ -1 + intercept1 +
f(inla.group(NN, #distance as a smooth effect
n = 20, #
method = "cut"),
model="rw1",
constr = TRUE,
scale.model = TRUE,
hyper = nPrior ) +
s_Area_AC150 + s_Area_AH120 + s_Area_AM150 +
s_Dist_Paved + s_Dist_River +
s_Dist_Unpav
No.ID.mod = inla(Formula,
data = inla.stack.data(Global.stk),
family = "gamma",
verbose = TRUE,
control.fixed = list(prec = 0.001,
prec.intercept = 0.0001),
control.predictor = list(
A = inla.stack.A(Global.stk),
compute = TRUE,
link = 1),
control.inla = list(strategy="gaussian",
int.strategy = "eb"),
control.results = list(return.marginals.random = TRUE,
return.marginals.predictor = TRUE),
control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE))
summary(No.ID.mod)
##
## Call:
## c("inla(formula = Formula, family = \"gamma\", data = inla.stack.data(Global.stk), ", " verbose = TRUE, control.compute = list(dic = TRUE, cpo = TRUE, ", " waic = TRUE), control.predictor = list(A = inla.stack.A(Global.stk), ", " compute = TRUE, link = 1), control.inla = list(strategy = \"gaussian\", ", " int.strategy = \"eb\"), control.results = list(return.marginals.random = TRUE, ", " return.marginals.predictor = TRUE), control.fixed = list(prec = 0.001, ", " prec.intercept = 1e-04))")
##
## Time used:
## Pre-processing Running inla Post-processing Total
## 2.0675 24.8117 0.9624 27.8416
##
## Fixed effects:
## mean sd 0.025quant 0.5quant 0.975quant mode kld
## intercept1 -0.0577 0.2288 -0.5070 -0.0577 0.3912 -0.0577 0
## s_Area_AC150 0.6227 0.0199 0.5836 0.6227 0.6617 0.6227 0
## s_Area_AH120 0.4119 0.0185 0.3755 0.4119 0.4483 0.4119 0
## s_Area_AM150 0.1355 0.0164 0.1034 0.1355 0.1676 0.1355 0
## s_Dist_Paved 0.3188 0.0180 0.2835 0.3188 0.3541 0.3188 0
## s_Dist_River -0.0747 0.0155 -0.1051 -0.0747 -0.0443 -0.0747 0
## s_Dist_Unpav 0.3463 0.0166 0.3137 0.3463 0.3789 0.3463 0
##
## Random effects:
## Name Model
## inla.group(NN, n = 20, method = "cut") RW1 model
##
## Model hyperparameters:
## mean sd
## Precision parameter for the Gamma observations 0.5183 0.0066
## Precision for inla.group(NN, n = 20, method = "cut") 1.3355 0.5262
## 0.025quant 0.5quant
## Precision parameter for the Gamma observations 0.5054 0.5183
## Precision for inla.group(NN, n = 20, method = "cut") 0.5777 1.2465
## 0.975quant mode
## Precision parameter for the Gamma observations 0.5315 0.5182
## Precision for inla.group(NN, n = 20, method = "cut") 2.6147 1.0829
##
## Expected number of effective parameters(std dev): 12.19(0.00)
## Number of equivalent replicates : 691.79
##
## Deviance Information Criterion (DIC) ...............: -9932.80
## Deviance Information Criterion (DIC, saturated) ....: -2.262e+162
## Effective number of parameters .....................: 12.59
##
## Watanabe-Akaike information criterion (WAIC) ...: -9935.53
## Effective number of parameters .................: 8.748
##
## Marginal log-Likelihood: 4905.00
## CPO and PIT are computed
##
## Posterior marginals for linear predictor and fitted values computed
Run non-spatial model without grouping by ID effect (just a linear model)
nPrior = list(theta=list(prior = "normal", param=c(0, 5)))
Formula = RASTERVALU ~ -1 + intercept1 +
s_Area_AC150 + s_Area_AH120 + s_Area_AM150 +
s_Dist_Paved + s_Dist_River +
s_Dist_Unpav
No.Space.ID.mod = inla(Formula,
data = inla.stack.data(Global.stk),
family = "gamma",
verbose = TRUE,
control.fixed = list(prec = 0.001,
prec.intercept = 0.0001),
control.predictor = list(
A = inla.stack.A(Global.stk),
compute = TRUE,
link = 1),
control.inla = list(strategy="gaussian",
int.strategy = "eb"),
control.results = list(return.marginals.random = TRUE,
return.marginals.predictor = TRUE),
control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE))
summary(No.Space.ID.mod)
##
## Call:
## c("inla(formula = Formula, family = \"gamma\", data = inla.stack.data(Global.stk), ", " verbose = TRUE, control.compute = list(dic = TRUE, cpo = TRUE, ", " waic = TRUE), control.predictor = list(A = inla.stack.A(Global.stk), ", " compute = TRUE, link = 1), control.inla = list(strategy = \"gaussian\", ", " int.strategy = \"eb\"), control.results = list(return.marginals.random = TRUE, ", " return.marginals.predictor = TRUE), control.fixed = list(prec = 0.001, ", " prec.intercept = 1e-04))")
##
## Time used:
## Pre-processing Running inla Post-processing Total
## 1.7473 16.8731 1.0532 19.6736
##
## Fixed effects:
## mean sd 0.025quant 0.5quant 0.975quant mode kld
## intercept1 -1.3695 0.0155 -1.3999 -1.3695 -1.3391 -1.3695 0
## s_Area_AC150 0.6087 0.0199 0.5696 0.6087 0.6479 0.6087 0
## s_Area_AH120 0.4067 0.0186 0.3701 0.4067 0.4433 0.4067 0
## s_Area_AM150 0.1304 0.0165 0.0979 0.1304 0.1628 0.1304 0
## s_Dist_Paved 0.2993 0.0179 0.2641 0.2993 0.3345 0.2993 0
## s_Dist_River -0.0756 0.0158 -0.1067 -0.0756 -0.0445 -0.0756 0
## s_Dist_Unpav 0.3228 0.0167 0.2900 0.3228 0.3555 0.3228 0
##
## The model has no random effects
##
## Model hyperparameters:
## mean sd 0.025quant
## Precision parameter for the Gamma observations 0.5074 0.0065 0.4948
## 0.5quant 0.975quant mode
## Precision parameter for the Gamma observations 0.5074 0.5202 0.5073
##
## Expected number of effective parameters(std dev): 7.027(0.00)
## Number of equivalent replicates : 1199.57
##
## Deviance Information Criterion (DIC) ...............: -9676.89
## Deviance Information Criterion (DIC, saturated) ....: -3.126e+164
## Effective number of parameters .....................: 7.035
##
## Watanabe-Akaike information criterion (WAIC) ...: -9678.53
## Effective number of parameters .................: 5.38
##
## Marginal log-Likelihood: 4783.82
## CPO and PIT are computed
##
## Posterior marginals for linear predictor and fitted values computed
Compare Model Metrics. The two global models look comparable, but, dropping the two non-significant covariates increases both the DIC and WAIC slightly. The Linear model without any spatial or grouping effects performs the worst.
Models = c("Global 1", "Global 2", "Non-Spatial",
"No ID", "Linear Model")
#Deviance information criteria
S1.DICs = c(GModel.1$dic$dic, GModel.2$dic$dic, Non.Spatial.mod$dic$dic,
No.ID.mod$dic$dic, No.Space.ID.mod$dic$dic)
S1.WAICs = c(GModel.1$waic$waic, GModel.2$waic$waic, Non.Spatial.mod$waic$waic,
No.ID.mod$waic$waic, No.Space.ID.mod$waic$waic)
Model_out = as.data.frame(cbind(Model = Models,
DIC = round(S1.DICs, 2),
WAIC = round(S1.WAICs, 2)))
kable(Model_out, caption = "Model Comparison") %>%
kable_styling("striped", full_width = F) %>%
row_spec(0, font_size = 20)
Model | DIC | WAIC |
---|---|---|
Global 1 | -12217.39 | -12205.28 |
Global 2 | -12138.8 | -12128.68 |
Non-Spatial | -11680.83 | -11669.37 |
No ID | -9932.8 | -9935.53 |
Linear Model | -9676.89 | -9678.53 |
Global Model 2 (GModel.2) Fixed Effect Estimates
FE.table = GModel.2$summary.fixed[,c(1:3,5)]
names(FE.table) = c("Mean", "sd", "Q0.025", "Q0.975")
kable(FE.table, caption = "Fixed Effects") %>%
kable_styling("striped", full_width = F) %>%
row_spec(0, font_size = 20)
Mean | sd | Q0.025 | Q0.975 | |
---|---|---|---|---|
intercept1 | 0.4124923 | 0.2229478 | -0.0252293 | 0.8498487 |
s_Area_AC150 | 0.4378669 | 0.0264420 | 0.3859524 | 0.4897381 |
s_Area_AH120 | 0.4468109 | 0.0203876 | 0.4067833 | 0.4868052 |
s_Area_AM150 | 0.1591554 | 0.0156700 | 0.1283899 | 0.1898954 |
s_Dist_Paved | 0.3051682 | 0.0200516 | 0.2658001 | 0.3445034 |
s_Dist_River | -0.5600091 | 0.0349001 | -0.6285297 | -0.4915457 |
s_Dist_Unpav | -1.2600674 | 0.0651103 | -1.3879008 | -1.1323407 |
ID Effect by Marten ID
mic.d = as.data.frame(GModel.2$summary.random$ID)
names(mic.d) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975", "mode", "kld")
ggplot(mic.d, aes(factor(ID), y=Mean)) +
geom_point(size=2, pch=19, col = "red") +
geom_linerange(aes(ymin=Q025, ymax=Q975), colour="black") +
geom_hline(yintercept = 0,
linetype = "solid",
col = "darkgray",
size = 0.5) +
theme_classic() +
xlab("Marten ID") +
ylab("RASTERVALU (log)") +
ggtitle("Marten ID Effect") +
theme(plot.title = element_text(hjust = 0.5),
axis.title.y = element_text(face="bold", size=18),
axis.title.x = element_text(face="bold", size=18),
title = element_text(face="bold", size=18, hjust=0.5),
strip.text.x = element_text(face="bold", size = 14, colour = "black"),
axis.text.y = element_text(face="bold", size=14),
axis.text.x = element_text(face="bold", size=14, vjust=0.5,
hjust=0.5, angle=90))
Spatial Effect (Neighbor distance)
mic.d = as.data.frame(GModel.2$summary.random$`inla.group(NN, n = 20, method = "cut")`)
names(mic.d) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975", "mode", "kld")
mic.d$ID = mic.d$ID*100 #Back to original scale, meters
ggplot(mic.d, aes(ID, Mean), lab) +
geom_smooth(col = "black",
linetype= "solid",
method = "loess",
span = 0.9,
se = FALSE,
lwd = 1) +
geom_smooth(data = mic.d, aes(ID, Q025),
col = "grey",
method = "loess",
span = 0.9,
se = FALSE,
linetype= "dashed") +
geom_smooth(data = mic.d, aes(ID, Q975),
col = "grey",
method = "loess",
span = 0.9,
se = FALSE,
linetype= "dashed") +
geom_hline(yintercept = 0,
linetype = "solid",
col = "darkgray",
size = 0.5) +
ylab("RASTERVALU (log)") +
xlab("Distance (meters)") +
ggtitle("Spatial Effect") +
theme_classic() +
theme(plot.title = element_text(hjust = 0.5),
title = element_text(face="bold", size=18, hjust=0.5),
axis.text=element_text(size=16),
strip.text = element_text(face="bold", size = 20),
axis.title.y = element_text(face="bold", size = 20),
axis.text.x = element_text(face="bold", size=14, vjust=0.5, angle=90),
axis.title.x = element_text(face="bold", size = 20))