Marten (Martes americana), U.P., Michigan

1 Get County Boundaries

Read county boundaries shapefile.

Counties = readOGR(dsn = "./Counties", 
                   layer = "Counties_v17a", 
                   stringsAsFactors = FALSE)
## OGR data source with driver: ESRI Shapefile 
## Source: "E:\HarvMod\HarvestModeling\Counties", layer: "Counties_v17a"
## with 83 features
## It has 15 fields
## Integer64 fields read as strings:  OBJECTID FIPSNUM
UP = subset(Counties, PENINSULA == "upper")

UP.union = gUnaryUnion(UP)


#Create a rasterized version  
Ras = raster(res = 0.01, ext = extent(UP.union),
             crs = proj4string(UP.union))

Domain.r = rasterize(UP.union, Ras, 
                     field = 0, 
                     background = NA)

#Creat a point grid version
Grd.pnt = rasterToPoints(Domain.r, spatial = TRUE)

Grd.pnt@data = Grd.pnt@data %>%
                mutate(Long = Grd.pnt@coords[,1],
                       Lat = Grd.pnt@coords[,2]) %>%
                select(-layer)

2 Load Marten Harvest Data

Second data set (with age and sex)

#Mart.df = read.csv("C:/Users/Talesha Dokes/Michigan State University/Roloff, Gary - HarvestModeling/MartenDataJun2019/Roloff_marten_registrations.csv",  
#                   header = TRUE, sep=",") 

Mart.df = read.csv("./MartenDataJun2019/Roloff_marten_registrations.csv",  
                   header = TRUE, sep=",") 

#Table with PLSS abd coordinates for Michigan
#PLSS = read.csv("F://MartenPhDWork//Hecology//MI_PLSS.csv",  #FYI - This file wasn't on the OneDrive
#                   header = TRUE, sep=",") 

PLSS = read.csv("./PLSS/MI_PLSS.csv",  
                   header = TRUE, sep=",") 

Mart.df$Long = with(PLSS, 
                      Long[match(
                            Mart.df$CorrectedTRSec,
                                 TWNRNGSEC)])

Mart.df$Lat = with(PLSS, 
                      Lat[match(
                            Mart.df$CorrectedTRSec,
                                 TWNRNGSEC)])

Mart.df = Mart.df %>%
            mutate(Year = Season_Year,
                   #Month = month(TakeDate),
                   Age = Lab_Age,
                   Sex = Lab_Sex,
                   Spp = "Marten",
                   OBS = 1,
                   Source = "MDNR") %>%
            filter(is.na(Year) == FALSE,
                   is.na(Long) == FALSE,
                   is.na(Lat) == FALSE) %>%
            select( Year, Age, Sex, Spp, OBS, Year, Long, Lat, Source)

dim(Mart.df)
## [1] 3479    8
#Count records per year
Mart.df %>%
  group_by(Year) %>%
  summarise(Count = length(Year))
## # A tibble: 19 x 2
##     Year Count
##    <int> <int>
##  1  2000    67
##  2  2001    63
##  3  2002    64
##  4  2003   114
##  5  2004   134
##  6  2005   125
##  7  2006   146
##  8  2007   233
##  9  2008   218
## 10  2009   214
## 11  2010   253
## 12  2011   178
## 13  2012   276
## 14  2013   251
## 15  2014   302
## 16  2015   223
## 17  2016    95
## 18  2017   170
## 19  2018   353
Mart.df = Mart.df %>% filter(Year < 2019)

Mart.df %>%
  group_by(Year) %>%
  summarise(Count = length(Year))
## # A tibble: 19 x 2
##     Year Count
##    <int> <int>
##  1  2000    67
##  2  2001    63
##  3  2002    64
##  4  2003   114
##  5  2004   134
##  6  2005   125
##  7  2006   146
##  8  2007   233
##  9  2008   218
## 10  2009   214
## 11  2010   253
## 12  2011   178
## 13  2012   276
## 14  2013   251
## 15  2014   302
## 16  2015   223
## 17  2016    95
## 18  2017   170
## 19  2018   353
dim(Mart.df)
## [1] 3479    8

Convert to Points

Marten.pnt = SpatialPointsDataFrame(Mart.df[,c("Long", "Lat")], Mart.df)
proj4string(Marten.pnt) = proj4string(UP)

#Keep only those in the UP
Marten.pnt$UP = is.na(over(Marten.pnt, UP.union))
Marten.pnt = subset(Marten.pnt, UP == FALSE)

Quick Plot

UP_map = fortify(UP)
## Regions defined for each Polygons
tmp.df = Marten.pnt@data %>% 
            select(Long, Lat, Year)

ggplot(UP_map, aes(long,lat, group=group)) + 
        geom_polygon(fill = "tan", col="black") + 
        geom_point(data=tmp.df, 
                   aes(Long, Lat, 
                       group=NULL, fill=NULL),
                       col = "red", size=1) +
        xlab("Longitude") +
        ylab("Latitude") +
        ggtitle("Marten Harvest") +
        guides(color=guide_legend(title = "Year")) +
        facet_wrap(~Year, ncol = 3) +
        coord_equal() + 
        theme(panel.grid.minor = element_blank(),
              panel.grid.major = element_blank(),
              panel.background = element_blank(),
              plot.background = element_blank(),
              panel.border = element_blank(),
              legend.title = element_blank(),
              axis.title.x = element_text(size=16, face="bold"),
              axis.title.y = element_text(size=16, face="bold"),
              plot.title = element_text(size=20, face="bold", hjust = 0.5))

Change Projection

nProj = "+proj=utm +zone=16 +datum=NAD83 +units=km +no_defs +ellps=GRS80 +towgs84=0,0,0"

UP.prj = spTransform(UP, nProj)
UP.union.prj = spTransform(UP.union, nProj)
Marten.pnt.prj = spTransform(Marten.pnt, nProj)

Marten.pnt.prj@data = Marten.pnt.prj@data %>%
                      mutate(Longp = Marten.pnt.prj@coords[,1],
                             Latp = Marten.pnt.prj@coords[,2])

3 Construct Mesh

max.edge = 2
bound.outer = 30

bdry = inla.sp2segment(UP.union.prj)

mesh = inla.mesh.2d(boundary = bdry,
                    loc=cbind(Marten.pnt.prj),
                    max.edge = c(1, 12)*max.edge,
                    cutoff = 5,
                    min.angle = 23,
                   offset = c(max.edge, bound.outer))

plot(mesh, lwd=0.5, main=" ", 
     rgl=F, draw.vertices = TRUE, 
     vertex.color = "blue")

mesh$n
## [1] 1984

Get Mesh Nodes

dd = as.data.frame(cbind(mesh$loc[,1], 
                         mesh$loc[,2]))

names(dd) = c("Longp", "Latp")
dd = dd %>%
     mutate(OBS = 0,
            Source = "Mesh")

##Make a copy for each year
Year.lvls = levels(factor(Marten.pnt.prj$Year))

for(i in 1:length(Year.lvls)){
      tmp.dd = dd %>%
               mutate(Year = Year.lvls[i])
      if(i==1){dd.df = tmp.dd}
        else{dd.df = rbind(dd.df, tmp.dd)}}

Month.lvls = levels(factor(Marten.pnt.prj$Month))
Age.lvls = levels(factor(Marten.pnt.prj$Age))

dd.length = dim(dd.df)[1]

#Random values for background points
#dd.df$Month = sample(Month.lvls, dd.length, replace = T)
dd.df$Sex = sample(c("F","M"), dd.length, replace = T)
dd.df$Age = sample(Age.lvls, dd.length, replace = T)

levels(factor(dd.df$Year))
##  [1] "2000" "2001" "2002" "2003" "2004" "2005" "2006" "2007" "2008" "2009"
## [11] "2010" "2011" "2012" "2013" "2014" "2015" "2016" "2017" "2018"
head(dd.df)
##      Longp     Latp OBS Source Year Sex Age
## 1 522.7310 5050.779   0   Mesh 2000   M 6.5
## 2 700.1876 5066.286   0   Mesh 2000   M 8.5
## 3 489.1956 5057.737   0   Mesh 2000   F 2.5
## 4 677.8654 5151.228   0   Mesh 2000   F 0.5
## 5 412.3404 5201.961   0   Mesh 2000   M 1.5
## 6 656.7944 5149.811   0   Mesh 2000   F 5.5

Combine Marten and Nodes Also converting to points.

Mod.pnts = Marten.pnt.prj@data %>%
           select(Longp, Latp, OBS, Source, Year, Sex, Age)

Mod.pnts = rbind(Mod.pnts, dd.df)

Mod.pnts = SpatialPointsDataFrame(Mod.pnts[,c("Longp", "Latp")], Mod.pnts)
proj4string(Mod.pnts) = proj4string(Marten.pnt.prj)

4 Monthly PRISM Data

This will download PRISM data to specified folder and then convert it to a raster. Matching climate to month of harvest.

Max Temperature (TMAX)

library(prism) 

range(Marten.pnt$Year)
## [1] 2000 2018
#range(Marten.pnt$Month)

options(prism.path = "./PRISM/TMAX") #Where to save the data; seperate folder for temp

get_prism_monthlys(type = 'tmax', #climate variable wanted, options: "ppt", "tmean", "tmin", "tmax"
                   years=2000:2018, #Years wanted
                   mon = 10:12, #Months wanted
                   keepZip = FALSE) #Don't keep zip file
## 
  |                                                                       
  |                                                                 |   0%
  |                                                                       
  |=                                                                |   2%
  |                                                                       
  |==                                                               |   4%
  |                                                                       
  |===                                                              |   5%
  |                                                                       
  |=====                                                            |   7%
  |                                                                       
  |======                                                           |   9%
  |                                                                       
  |=======                                                          |  11%
  |                                                                       
  |========                                                         |  12%
  |                                                                       
  |=========                                                        |  14%
  |                                                                       
  |==========                                                       |  16%
  |                                                                       
  |===========                                                      |  18%
  |                                                                       
  |=============                                                    |  19%
  |                                                                       
  |==============                                                   |  21%
  |                                                                       
  |===============                                                  |  23%
  |                                                                       
  |================                                                 |  25%
  |                                                                       
  |=================                                                |  26%
  |                                                                       
  |==================                                               |  28%
  |                                                                       
  |===================                                              |  30%
  |                                                                       
  |=====================                                            |  32%
  |                                                                       
  |======================                                           |  33%
  |                                                                       
  |=======================                                          |  35%
  |                                                                       
  |========================                                         |  37%
  |                                                                       
  |=========================                                        |  39%
  |                                                                       
  |==========================                                       |  40%
  |                                                                       
  |===========================                                      |  42%
  |                                                                       
  |=============================                                    |  44%
  |                                                                       
  |==============================                                   |  46%
  |                                                                       
  |===============================                                  |  47%
  |                                                                       
  |================================                                 |  49%
  |                                                                       
  |=================================                                |  51%
  |                                                                       
  |==================================                               |  53%
  |                                                                       
  |===================================                              |  54%
  |                                                                       
  |====================================                             |  56%
  |                                                                       
  |======================================                           |  58%
  |                                                                       
  |=======================================                          |  60%
  |                                                                       
  |========================================                         |  61%
  |                                                                       
  |=========================================                        |  63%
  |                                                                       
  |==========================================                       |  65%
  |                                                                       
  |===========================================                      |  67%
  |                                                                       
  |============================================                     |  68%
  |                                                                       
  |==============================================                   |  70%
  |                                                                       
  |===============================================                  |  72%
  |                                                                       
  |================================================                 |  74%
  |                                                                       
  |=================================================                |  75%
  |                                                                       
  |==================================================               |  77%
  |                                                                       
  |===================================================              |  79%
  |                                                                       
  |====================================================             |  81%
  |                                                                       
  |======================================================           |  82%
  |                                                                       
  |=======================================================          |  84%
  |                                                                       
  |========================================================         |  86%
  |                                                                       
  |=========================================================        |  88%
  |                                                                       
  |==========================================================       |  89%
  |                                                                       
  |===========================================================      |  91%
  |                                                                       
  |============================================================     |  93%
  |                                                                       
  |==============================================================   |  95%
  |                                                                       
  |===============================================================  |  96%
  |                                                                       
  |================================================================ |  98%
  |                                                                       
  |=================================================================| 100%
GetFiles = ls_prism_data(name=TRUE) #File names

#Count files
dim(GetFiles)
## [1] 57  2
#View (2 columns)
head(GetFiles)
##                                files
## 1 PRISM_tmax_stable_4kmM2_200010_bil
## 2 PRISM_tmax_stable_4kmM2_200011_bil
## 3 PRISM_tmax_stable_4kmM2_200012_bil
## 4 PRISM_tmax_stable_4kmM2_200110_bil
## 5 PRISM_tmax_stable_4kmM2_200111_bil
## 6 PRISM_tmax_stable_4kmM2_200112_bil
##                                       product_name
## 1 Oct  2000 - 4km resolution - Maximum temperature
## 2 Nov  2000 - 4km resolution - Maximum temperature
## 3 Dec  2000 - 4km resolution - Maximum temperature
## 4 Oct  2001 - 4km resolution - Maximum temperature
## 5 Nov  2001 - 4km resolution - Maximum temperature
## 6 Dec  2001 - 4km resolution - Maximum temperature
MyMonths = substr(GetFiles$product_name, 1, 3) #Pull month abbreviation
unique(MyMonths)
## [1] "Oct" "Nov" "Dec"
MyYear = substr(GetFiles$product_name, 6, 9) #Pull year
unique(MyYear)
##  [1] "2000" "2001" "2002" "2003" "2004" "2005" "2006" "2007" "2008" "2009"
## [11] "2010" "2011" "2012" "2013" "2014" "2015" "2016" "2017" "2018"
#stack rasters
TMAX.stk = prism_stack(ls_prism_data())

#Simplified names
names(TMAX.stk) = paste(MyMonths, MyYear, sep="_")

#Plot one
plot(TMAX.stk[[1]])

##Create TMAX Dataframe 
TMAX.df = as.data.frame(
                  extract(TMAX.stk, #Finding values for each location
                          spTransform(Mod.pnts,
                                      proj4string(TMAX.stk)),
                          method = "simple"))


#Some points for the mesh were over water and coded as NA, impute by month and year
for(i in 1:dim(TMAX.df)[2]){
  
  TMAX.df[,i][is.na(TMAX.df[,i])] = mean(TMAX.df[,i], na.rm=TRUE)
  
}

TMAX.df$RowID = 1:nrow(TMAX.df)

dim(TMAX.df)
## [1] 41145    58
head(TMAX.df)
##   Oct_2000 Nov_2000 Dec_2000 Oct_2001 Nov_2001 Dec_2001 Oct_2002 Nov_2002
## 1    15.37     3.37    -7.70    12.42     9.86     0.12     7.73     1.74
## 2    15.19     3.10    -8.01    12.25     9.63    -0.06     7.41     1.50
## 3    15.59     3.89    -7.56    11.78     8.40    -0.27     8.40     1.10
## 4    15.02     4.34    -6.17    11.04     8.54     0.68     9.03     2.13
## 5    14.73     4.89    -5.55    12.46     8.49     1.61     9.64     2.55
## 6    14.47     5.48    -5.17    11.98     7.81     2.07     9.48     2.84
##   Dec_2002 Oct_2003 Nov_2003 Dec_2003 Oct_2004 Nov_2004 Dec_2004 Oct_2005
## 1    -1.42    11.74     2.60    -0.56    12.95     5.46    -3.74    13.80
## 2    -1.71    11.53     2.33    -0.84    12.69     5.18    -4.03    13.59
## 3    -1.85    11.61     2.24    -1.20    12.00     4.31    -4.70    13.85
## 4    -0.96    11.66     3.35    -0.23    13.10     5.55    -2.85    13.68
## 5    -0.87    11.09     4.25     0.19    12.94     5.77    -2.55    13.88
## 6    -0.09    10.89     5.05     0.93    12.64     6.16    -1.75    13.84
##   Nov_2005 Dec_2005 Oct_2006 Nov_2006 Dec_2006 Oct_2007 Nov_2007 Dec_2007
## 1     3.92    -4.01     9.80     5.24    -0.48    14.84     2.58    -4.45
## 2     3.69    -4.29     9.58     5.00    -0.74    14.64     2.33    -4.74
## 3     2.97    -4.47     9.62     4.66    -0.93    14.80     2.22    -4.60
## 4     3.96    -3.38     9.87     5.51     0.18    14.99     3.24    -3.17
## 5     4.73    -3.27    10.53     5.61     0.47    15.22     4.14    -2.52
## 6     5.33    -2.49    10.19     5.74     1.26    15.14     4.19    -1.60
##   Oct_2008 Nov_2008 Dec_2008 Oct_2009 Nov_2009 Dec_2009 Oct_2010 Nov_2010
## 1    11.58     2.97    -6.27     8.22     7.65    -4.73    13.98     4.82
## 2    11.37     2.72    -6.50     7.95     7.44    -4.98    13.79     4.59
## 3    11.80     2.63    -6.31     8.06     7.17    -4.84    14.33     4.64
## 4    11.55     3.95    -4.59     7.96     8.29    -3.58    14.35     5.55
## 5    12.10     4.44    -4.12     9.09     8.00    -2.85    14.24     5.85
## 6    11.66     4.74    -3.42     8.91     8.06    -1.98    13.87     6.30
##   Dec_2010 Oct_2011 Nov_2011 Dec_2011 Oct_2012 Nov_2012 Dec_2012 Oct_2013
## 1    -4.36    14.04     5.18    -1.21    11.06     3.89    -1.40    11.83
## 2    -4.59    13.81     4.93    -1.45    10.84     3.68    -1.63    11.59
## 3    -4.19    13.92     4.38    -1.58    11.16     3.29    -1.67    11.67
## 4    -3.10    14.14     5.49    -0.80    11.23     4.15    -0.15    12.07
## 5    -2.82    14.25     6.00    -0.02    12.15     4.57     0.07    12.71
## 6    -2.27    13.91     6.76     0.70    12.22     5.21     0.70    12.58
##   Nov_2013 Dec_2013 Oct_2014 Nov_2014 Dec_2014 Oct_2015 Nov_2015 Dec_2015
## 1     2.09    -7.83    10.50    -1.01    -2.29    12.58     7.38     0.79
## 2     1.85    -8.10    10.32    -1.27    -2.53    12.39     7.14     0.54
## 3     1.68    -8.00     9.96    -1.60    -2.42    12.25     6.68     0.67
## 4     2.78    -6.53    10.04    -0.40    -1.39    12.21     7.59     2.16
## 5     3.48    -6.18    10.76     0.51    -1.13    12.57     7.78     2.75
## 6     4.01    -5.78    10.79     1.13    -0.32    12.38     8.13     3.50
##   Oct_2016 Nov_2016 Dec_2016 Oct_2017 Nov_2017 Dec_2017 Oct_2018 Nov_2018
## 1   13.203    8.715   -3.665   14.229    1.526   -5.771    7.995   -1.016
## 2   13.031    8.476   -3.932   13.993    1.285   -6.056    7.865   -1.245
## 3   12.878    8.246   -3.955   14.158    0.925   -6.052    7.732   -1.376
## 4   13.124    9.050   -2.271   14.103    2.398   -4.367    8.467    0.320
## 5   14.163    9.015   -1.810   14.475    2.823   -4.787    9.035    0.607
## 6   14.365    9.112   -0.527   14.775    3.600   -4.163    9.176    1.213
##   Dec_2018 RowID
## 1   -1.461     1
## 2   -1.669     2
## 3   -1.813     3
## 4   -0.454     4
## 5   -0.562     5
## 6    0.330     6

Find the average of October, November, and December for each year.

Melt.Temp.df = melt(TMAX.df, "RowID")
head(Melt.Temp.df) #Now in long format
##   RowID variable value
## 1     1 Oct_2000 15.37
## 2     2 Oct_2000 15.19
## 3     3 Oct_2000 15.59
## 4     4 Oct_2000 15.02
## 5     5 Oct_2000 14.73
## 6     6 Oct_2000 14.47
Melt.Temp.df$Year = substr(Melt.Temp.df$variable, 5, 9)
head(Melt.Temp.df)
##   RowID variable value Year
## 1     1 Oct_2000 15.37 2000
## 2     2 Oct_2000 15.19 2000
## 3     3 Oct_2000 15.59 2000
## 4     4 Oct_2000 15.02 2000
## 5     5 Oct_2000 14.73 2000
## 6     6 Oct_2000 14.47 2000
range(Melt.Temp.df$Year) #double checking
## [1] "2000" "2018"
TMAX.df.avg = as.data.frame(
                 Melt.Temp.df %>%
                   group_by(Year) %>% #only Oct-Dec are in dataset
                   summarise(Average = mean(value)))

head(TMAX.df.avg)
##   Year  Average
## 1 2000 4.380668
## 2 2001 7.594855
## 3 2002 3.657639
## 4 2003 5.188226
## 5 2004 5.441967
## 6 2005 5.154181
#Match to marten records by Year
Mod.pnts$TMAX = with(TMAX.df.avg, 
                        Average[match(
                          Mod.pnts$Year,
                                 Year)])

Total Precipitation (PPT) As with TMAX

options(prism.path = "./PRISM/PPT") #Where to save the data; seperate folder for precip

get_prism_monthlys(type = 'ppt', #climate variable wanted, options: "ppt", "tmean", "tmin", "tmax"
                   years=2000:2018, #Years wanted
                   mon = 10:12, #Months wanted
                   keepZip = FALSE) #Don't keep zip file
## 
  |                                                                       
  |                                                                 |   0%
  |                                                                       
  |=                                                                |   2%
  |                                                                       
  |==                                                               |   4%
  |                                                                       
  |===                                                              |   5%
  |                                                                       
  |=====                                                            |   7%
  |                                                                       
  |======                                                           |   9%
  |                                                                       
  |=======                                                          |  11%
  |                                                                       
  |========                                                         |  12%
  |                                                                       
  |=========                                                        |  14%
  |                                                                       
  |==========                                                       |  16%
  |                                                                       
  |===========                                                      |  18%
  |                                                                       
  |=============                                                    |  19%
  |                                                                       
  |==============                                                   |  21%
  |                                                                       
  |===============                                                  |  23%
  |                                                                       
  |================                                                 |  25%
  |                                                                       
  |=================                                                |  26%
  |                                                                       
  |==================                                               |  28%
  |                                                                       
  |===================                                              |  30%
  |                                                                       
  |=====================                                            |  32%
  |                                                                       
  |======================                                           |  33%
  |                                                                       
  |=======================                                          |  35%
  |                                                                       
  |========================                                         |  37%
  |                                                                       
  |=========================                                        |  39%
  |                                                                       
  |==========================                                       |  40%
  |                                                                       
  |===========================                                      |  42%
  |                                                                       
  |=============================                                    |  44%
  |                                                                       
  |==============================                                   |  46%
  |                                                                       
  |===============================                                  |  47%
  |                                                                       
  |================================                                 |  49%
  |                                                                       
  |=================================                                |  51%
  |                                                                       
  |==================================                               |  53%
  |                                                                       
  |===================================                              |  54%
  |                                                                       
  |====================================                             |  56%
  |                                                                       
  |======================================                           |  58%
  |                                                                       
  |=======================================                          |  60%
  |                                                                       
  |========================================                         |  61%
  |                                                                       
  |=========================================                        |  63%
  |                                                                       
  |==========================================                       |  65%
  |                                                                       
  |===========================================                      |  67%
  |                                                                       
  |============================================                     |  68%
  |                                                                       
  |==============================================                   |  70%
  |                                                                       
  |===============================================                  |  72%
  |                                                                       
  |================================================                 |  74%
  |                                                                       
  |=================================================                |  75%
  |                                                                       
  |==================================================               |  77%
  |                                                                       
  |===================================================              |  79%
  |                                                                       
  |====================================================             |  81%
  |                                                                       
  |======================================================           |  82%
  |                                                                       
  |=======================================================          |  84%
  |                                                                       
  |========================================================         |  86%
  |                                                                       
  |=========================================================        |  88%
  |                                                                       
  |==========================================================       |  89%
  |                                                                       
  |===========================================================      |  91%
  |                                                                       
  |============================================================     |  93%
  |                                                                       
  |==============================================================   |  95%
  |                                                                       
  |===============================================================  |  96%
  |                                                                       
  |================================================================ |  98%
  |                                                                       
  |=================================================================| 100%
GetFiles = ls_prism_data(name=TRUE) #File names

#Count files
dim(GetFiles)
## [1] 57  2
#View (2 columns)
head(GetFiles)
##                               files
## 1 PRISM_ppt_stable_4kmM3_200010_bil
## 2 PRISM_ppt_stable_4kmM3_200011_bil
## 3 PRISM_ppt_stable_4kmM3_200012_bil
## 4 PRISM_ppt_stable_4kmM3_200110_bil
## 5 PRISM_ppt_stable_4kmM3_200111_bil
## 6 PRISM_ppt_stable_4kmM3_200112_bil
##                                 product_name
## 1 Oct  2000 - 4km resolution - Precipitation
## 2 Nov  2000 - 4km resolution - Precipitation
## 3 Dec  2000 - 4km resolution - Precipitation
## 4 Oct  2001 - 4km resolution - Precipitation
## 5 Nov  2001 - 4km resolution - Precipitation
## 6 Dec  2001 - 4km resolution - Precipitation
MyMonths = substr(GetFiles$product_name, 1, 3) #Pull month abbreviation 
MyYear = substr(GetFiles$product_name, 6, 9) #Pull year

#stack rasters
PPT.stk = prism_stack(ls_prism_data())

#Simplified names
names(PPT.stk) = paste(MyMonths, MyYear, sep="_")

#Plot one
plot(PPT.stk[[1]])

##Create TMAX Dataframe 
PPT.df = as.data.frame(
                  extract(PPT.stk, #Finding values for each location
                          spTransform(Mod.pnts,
                                      proj4string(PPT.stk)),
                          method = "simple"))

for(i in 1:dim(PPT.df)[2]){
  
  PPT.df[,i][is.na(PPT.df[,i])] = mean(PPT.df[,i], na.rm=TRUE)
  
}


PPT.df$RowID = 1:nrow(PPT.df)

dim(PPT.df)
## [1] 41145    58
head(PPT.df)
##   Oct_2000 Nov_2000 Dec_2000 Oct_2001 Nov_2001 Dec_2001 Oct_2002 Nov_2002
## 1    35.23    68.64    59.40    70.85    56.38    46.87   140.93    42.62
## 2    33.88    59.09    53.95    66.62    55.18    44.57   152.46    35.17
## 3    51.79    66.46    73.69    87.62    57.93    54.60   170.89    49.66
## 4    68.00    98.02    90.95    99.44    70.53    55.96   160.45    74.15
## 5    55.36    76.43    76.33   126.48    65.01    65.38   134.16    44.55
## 6    40.99    57.05    42.53   147.86    98.53    60.38    88.01    26.71
##   Dec_2002 Oct_2003 Nov_2003 Dec_2003 Oct_2004 Nov_2004 Dec_2004 Oct_2005
## 1    13.06    58.09    94.05    41.44    87.92    35.08    53.50   150.22
## 2    11.50    55.22    86.63    40.02    89.44    32.11    53.35   149.34
## 3    11.91    74.02    72.57    53.71   125.62    46.47    96.05   138.99
## 4    24.85    79.12    75.21    72.59   133.70    67.82    84.19   147.32
## 5    20.48    64.00    87.14    67.48   169.62    67.73    97.36   110.26
## 6    23.56    55.57   122.05    58.28   129.21    75.60    89.22    56.30
##   Nov_2005 Dec_2005 Oct_2006 Nov_2006 Dec_2006 Oct_2007 Nov_2007 Dec_2007
## 1    84.30    38.54    65.89    34.45    62.60   154.93    36.14    60.55
## 2    78.66    36.73    64.23    33.19    61.59   163.23    34.34    58.40
## 3    86.03    30.61    64.05    29.75    51.61   129.86    67.04    66.02
## 4   123.71    45.12    64.60    42.56    64.29   126.61    54.47    75.03
## 5   105.97    39.65    91.19   174.70    70.11   127.09    57.82    67.61
## 6   109.45    32.46    83.26    62.77    74.40   117.33    62.17    45.04
##   Oct_2008 Nov_2008 Dec_2008 Oct_2009 Nov_2009 Dec_2009 Oct_2010 Nov_2010
## 1    57.03    44.59    56.57   134.40    32.66    58.91    39.70    52.98
## 2    56.87    42.37    53.41   132.85    30.79    55.01    41.47    50.61
## 3    63.85    56.49    60.74   119.87    15.86    54.66    61.07    41.72
## 4    66.59    67.34    91.52   140.55    32.13    72.62    61.73    50.63
## 5    55.81    76.84   108.77   140.73    30.92    67.82    59.02    53.92
## 6    56.97    70.38   118.40   165.26    34.64    59.42    60.92    76.84
##   Dec_2010 Oct_2011 Nov_2011 Dec_2011 Oct_2012 Nov_2012 Dec_2012 Oct_2013
## 1    40.03    60.24    46.64    40.68   113.64    52.00    22.41    96.27
## 2    38.21    59.96    43.89    38.31   120.09    47.56    21.97    99.82
## 3    54.41    73.26    45.26    38.97   111.51    43.41    28.53    85.61
## 4    86.02    65.07    53.99    47.83   125.61    49.51    40.47    90.69
## 5    49.52    82.14    69.66    48.54   133.84    46.74    49.51   100.94
## 6    41.93    69.42    87.50    41.08   120.32    26.88    59.42    78.06
##   Nov_2013 Dec_2013 Oct_2014 Nov_2014 Dec_2014 Oct_2015 Nov_2015 Dec_2015
## 1    93.79    44.86   115.52   101.75    58.89    60.81    88.71   99.479
## 2    89.88    41.66   111.69    99.34    55.18    65.16    85.55   99.671
## 3    80.38    32.39   112.27    80.46    50.61    64.44    60.07   87.878
## 4   120.79    60.37   165.89   117.36    58.13    71.53    89.21  125.580
## 5   176.53    87.10   174.43   148.20    58.09    61.57   115.53  133.260
## 6   173.93    63.04   229.87   127.88    64.02    90.23   142.60  151.865
##   Oct_2016 Nov_2016 Dec_2016 Oct_2017 Nov_2017 Dec_2017 Oct_2018 Nov_2018
## 1   61.122   40.455   64.623  168.823   58.181   55.480  174.724   59.695
## 2   59.744   42.982   59.779  166.483   53.320   52.893  171.800   54.773
## 3   81.642   37.667   47.970  153.757   47.921   51.701  156.593   61.696
## 4  178.877   49.675   64.092  168.305   46.266   72.738  168.485   66.063
## 5  117.555   65.505   88.331  173.269   63.590  122.602  202.388   67.014
## 6   87.207   53.003   74.624  173.178   69.833   80.728  213.381   82.058
##   Dec_2018 RowID
## 1   38.523     1
## 2   34.709     2
## 3   41.682     3
## 4   51.381     4
## 5   43.737     5
## 6   46.274     6

Find the average of October, November, and December for each year.

Melt.PPT.df = melt(PPT.df, "RowID")
head(Melt.PPT.df) #Now in long format
##   RowID variable value
## 1     1 Oct_2000 35.23
## 2     2 Oct_2000 33.88
## 3     3 Oct_2000 51.79
## 4     4 Oct_2000 68.00
## 5     5 Oct_2000 55.36
## 6     6 Oct_2000 40.99
Melt.PPT.df$Year = substr(Melt.PPT.df$variable, 5, 9)
head(Melt.PPT.df)
##   RowID variable value Year
## 1     1 Oct_2000 35.23 2000
## 2     2 Oct_2000 33.88 2000
## 3     3 Oct_2000 51.79 2000
## 4     4 Oct_2000 68.00 2000
## 5     5 Oct_2000 55.36 2000
## 6     6 Oct_2000 40.99 2000
range(Melt.PPT.df$Year) #double checking
## [1] "2000" "2018"
PPT.df.avg = as.data.frame(
                 Melt.PPT.df %>%
                   group_by(Year) %>% #only Oct-Dec are in dataset
                   summarise(Average = mean(value)))

head(PPT.df.avg)
##   Year  Average
## 1 2000 56.86325
## 2 2001 76.81729
## 3 2002 61.61163
## 4 2003 60.23360
## 5 2004 83.30350
## 6 2005 79.25578
#Match to marten records by Year
Mod.pnts$PPT = with(PPT.df.avg, 
                        Average[match(
                          Mod.pnts$Year,
                                 Year)])

range(Mod.pnts$PPT)
## [1]  51.65295 100.51448

5 SUm of Road Lengths

#suppressMessages(library(spatstat))
#Rds = readOGR(dsn = "F://MartenPhDWork//Hecology//Allroads", 
#                layer = "output", 
#                stringsAsFactors = FALSE)

#Rds = spTransform(Rds, nProj) #convert projection

#Road.psp = as.psp(Rds) #format roads
#load("F://MartenPhDWork//Hecology//Rds.RData")
loc.road = raster("./Roads/locrdssecsum")

maj.road = raster("./Roads/majrdssecsum")
#M.pp = as.ppp(Mod.pnts) #format marten points

#Distance from each point to nearest road
#Mod.pnts$nRoad = nncross(M.pp, Road.psp)[,"dist"]


Mod.pnts$loc= as.numeric(
                  extract(loc.road, #Finding values for each location
                          spTransform(Mod.pnts,
                                      proj4string(loc.road)),
                          method = "simple"))

Mod.pnts$loc[Mod.pnts$loc < 0] = NA
Mod.pnts$loc[is.na(Mod.pnts$loc)] = 0 #these aren't random (mostly mesh points over water)

Mod.pnts$maj = as.numeric(
                  extract(maj.road, #Finding values for each location
                          spTransform(Mod.pnts,
                                      proj4string(maj.road)),
                          method = "simple"))

Mod.pnts$maj[Mod.pnts$maj < 0] = NA
Mod.pnts$maj[is.na(Mod.pnts$maj)] = 0

6 LandFire

LandFire are factor-type attributes, not continuous.

VDISTURB: 1. Extract data.
2. Create a “Look.up” value based on closest matching year.
3. Match individual records to available landfore years.

#LOad the data
VDISTURB <- list.files(path="./LANDFIRE/VDISTURB", pattern = "secmaj$", full.names = TRUE)
VDISTURB.LF <- raster::stack(VDISTURB)

#Extract to a data frame
VDISTURB.df = as.data.frame(
                extract(VDISTURB.LF, 
                 spTransform(Mod.pnts, 
                    proj4string(VDISTURB.LF), method = "simple")))


head(VDISTURB.df)
##   v2008secmaj v2010secmaj v2012secmaj v2014secmaj
## 1           0           0           0           0
## 2           0           0           0           0
## 3           0           0           0           0
## 4           0           0           0           0
## 5           0           0           0           0
## 6           0           0           0           0
#create year column
VDISTURB.mlt = melt(VDISTURB.df)
## No id variables; using all as measure variables
VDISTURB.mlt$Year = substr(VDISTURB.mlt$variable, 2, 5)
head(VDISTURB.mlt)
##      variable value Year
## 1 v2008secmaj     0 2008
## 2 v2008secmaj     0 2008
## 3 v2008secmaj     0 2008
## 4 v2008secmaj     0 2008
## 5 v2008secmaj     0 2008
## 6 v2008secmaj     0 2008
Available.yrs = as.numeric(unique(VDISTURB.mlt$Year)) #ID available years in dataset, convert to number
Available.yrs
## [1] 2008 2010 2012 2014
Mod.pnts$Year = as.integer(Mod.pnts$Year) #Convert to number

Close.match = function(x){which.min(abs(x - Available.yrs))} #what is the position of the number with minimal absolute difference.

#Test (demo)
Close.match(2010) #test with 2010 that is available
## [1] 2
Available.yrs[Close.match(2010)]
## [1] 2010
Close.match(2011) #test with 2011 (not available)
## [1] 2
Available.yrs[Close.match(2011)]
## [1] 2010
Close.match(2013) #test with 2013 (not available)
## [1] 3
Available.yrs[Close.match(2013)]
## [1] 2012
Mod.pnts$Look.up = Available.yrs[sapply(Mod.pnts$Year, Close.match)]

head(Mod.pnts[,c("Year","Look.up")]) #2014 is most recent
##   Year Look.up
## 1 2017    2014
## 2 2017    2014
## 3 2017    2014
## 4 2017    2014
## 5 2017    2014
## 6 2017    2014
tail(Mod.pnts[,c("Year","Look.up")])
##       Year Look.up
## 41140 2018    2014
## 41141 2018    2014
## 41142 2018    2014
## 41143 2018    2014
## 41144 2018    2014
## 41145 2018    2014
#Assign column based on Look.up year
Mod.pnts$VDISTURB = ifelse(Mod.pnts$Look.up == 2014, VDISTURB.df$v2014secmaj,
                      ifelse(Mod.pnts$Look.up == 2008, VDISTURB.df$v2008secmaj,
                          ifelse(Mod.pnts$Look.up == 2010, VDISTURB.df$v2010secmaj,
                            ifelse(Mod.pnts$Look.up == 2012, VDISTURB.df$v2012secmaj,
                                  NA))))

Mod.pnts$VDISTURB[is.na(Mod.pnts$VDISTURB)] = 0

CBD

#LOad the data
CBD <- list.files(path="./LANDFIRE/CBD", 
                  pattern = "secmaj$", full.names = TRUE)
CBD.LF <- raster::stack(CBD)

#Extract to a data frame
CBD.df = as.data.frame(
                extract(CBD.LF, 
                 spTransform(Mod.pnts, 
                    proj4string(CBD.LF), method = "simple")))


head(CBD.df)
##   cbd2001secmaj cbd2008secmaj cbd2010secmaj cbd2012secmaj cbd2014secmaj
## 1             1             1             1             1             1
## 2             1             1             1             1             1
## 3             1             1             1             1             1
## 4             1             1             1             1             1
## 5             1             1             1             1             1
## 6             1             1             1             1             1
#create year column
CBD.mlt = melt(CBD.df)
## No id variables; using all as measure variables
CBD.mlt$Year = substr(CBD.mlt$variable, 4, 7)
head(CBD.mlt)
##        variable value Year
## 1 cbd2001secmaj     1 2001
## 2 cbd2001secmaj     1 2001
## 3 cbd2001secmaj     1 2001
## 4 cbd2001secmaj     1 2001
## 5 cbd2001secmaj     1 2001
## 6 cbd2001secmaj     1 2001
Available.yrs = as.numeric(unique(CBD.mlt$Year)) #ID available years in dataset, convert to number
Available.yrs
## [1] 2001 2008 2010 2012 2014
Mod.pnts$Year = as.integer(Mod.pnts$Year) #Convert to number

Mod.pnts$Look.up = Available.yrs[sapply(Mod.pnts$Year, Close.match)] #Overwrite prior look up year

#Assign column based on Look.up year
Mod.pnts$CBD = ifelse(Mod.pnts$Look.up == 2014, CBD.df$cbd2014secmaj,
                      ifelse(Mod.pnts$Look.up == 2001, CBD.df$cbd2001secmaj,
                          ifelse(Mod.pnts$Look.up == 2010, CBD.df$cbd2010secmaj,
                            ifelse(Mod.pnts$Look.up == 2012, CBD.df$cbd2012secmaj,
                                ifelse(Mod.pnts$Look.up == 2008, CBD.df$cbd2008secmaj,
                                  NA)))))

Mod.pnts$CBD[is.na(Mod.pnts$CBD)] = 0

EVC

#LOad the data
EVC <- list.files(path="./LANDFIRE/EVC", 
                  pattern = "secmaj$", full.names = TRUE)
EVC.LF <- raster::stack(EVC)

#Extract to a data frame
EVC.df = as.data.frame(
                extract(EVC.LF, 
                 spTransform(Mod.pnts, 
                    proj4string(EVC.LF), method = "simple")))


head(EVC.df)
##   evc2001secmaj evc2008secmaj evc2010secmaj evc2012secmaj evc2014secmaj
## 1           108           108           108           108           107
## 2           108           107           107           107           107
## 3           107           107           107           107           107
## 4           107           107           107           107           107
## 5           108           108           108           107           107
## 6           106           107           107           107           107
#create year column
EVC.mlt = melt(EVC.df)
## No id variables; using all as measure variables
EVC.mlt$Year = substr(EVC.mlt$variable, 4, 7)
head(EVC.mlt)
##        variable value Year
## 1 evc2001secmaj   108 2001
## 2 evc2001secmaj   108 2001
## 3 evc2001secmaj   107 2001
## 4 evc2001secmaj   107 2001
## 5 evc2001secmaj   108 2001
## 6 evc2001secmaj   106 2001
Available.yrs = as.numeric(unique(EVC.mlt$Year)) #ID available years in dataset, convert to number
Available.yrs
## [1] 2001 2008 2010 2012 2014
Mod.pnts$Year = as.integer(Mod.pnts$Year) #Convert to number

Mod.pnts$Look.up = Available.yrs[sapply(Mod.pnts$Year, Close.match)] #Overwrite prior look up year

#Assign column based on Look.up year
Mod.pnts$EVC = ifelse(Mod.pnts$Look.up == 2014, EVC.df$evc2014secmaj,
                      ifelse(Mod.pnts$Look.up == 2001, EVC.df$evc2001secmaj,
                          ifelse(Mod.pnts$Look.up == 2010, EVC.df$evc2010secmaj,
                            ifelse(Mod.pnts$Look.up == 2012, EVC.df$evc2012secmaj,
                                ifelse(Mod.pnts$Look.up == 2008, EVC.df$evc2008secmaj,
                                  NA)))))

Mod.pnts$EVC[is.na(Mod.pnts$EVC)] = 0
Mod.pnts$EVC[Mod.pnts$EVC < 0] = 0

EVH

#Load the data
EVH <- list.files(path="./LANDFIRE/EVH", pattern = "secmaj$", full.names = TRUE)
EVH.LF <- raster::stack(EVH)


#Extract to a data frame
EVH.df = as.data.frame(
                extract(EVH.LF, 
                 spTransform(Mod.pnts, 
                    proj4string(EVH.LF), method = "simple")))


head(EVH.df)
##   evh2001secmaj evh2008secmaj evh2010secmaj evh2012secmaj
## 1           110           110           110           110
## 2           110           110           110           110
## 3           110           110           110           110
## 4           110           110           110           110
## 5           110           110           110           110
## 6           110           110           110           110
#create year column
EVH.mlt = melt(EVH.df)
## No id variables; using all as measure variables
EVH.mlt$Year = substr(EVH.mlt$variable, 4, 7)
head(EVH.mlt)
##        variable value Year
## 1 evh2001secmaj   110 2001
## 2 evh2001secmaj   110 2001
## 3 evh2001secmaj   110 2001
## 4 evh2001secmaj   110 2001
## 5 evh2001secmaj   110 2001
## 6 evh2001secmaj   110 2001
Available.yrs = as.numeric(unique(EVH.mlt$Year)) #ID available years in dataset, convert to number
Available.yrs
## [1] 2001 2008 2010 2012
Mod.pnts$Year = as.integer(Mod.pnts$Year) #Convert to number

Mod.pnts$Look.up = Available.yrs[sapply(Mod.pnts$Year, Close.match)] #Overwrite prior look up year

#Assign column based on Look.up year
Mod.pnts$EVH = ifelse(Mod.pnts$Look.up == 2001, EVH.df$evh2001secmaj,
                          ifelse(Mod.pnts$Look.up == 2010, EVH.df$evh2010secmaj,
                            ifelse(Mod.pnts$Look.up == 2012, EVH.df$evh2012secmaj,
                                ifelse(Mod.pnts$Look.up == 2008, EVH.df$evh2008secmaj,
                                  NA))))

Mod.pnts$EVH[is.na(Mod.pnts$EVH)] = 0

EVT

#Load the data
EVT <- list.files(path="./LANDFIRE/EVT", pattern = "secmaj$", full.names = TRUE)
EVT.LF <- raster::stack(EVT)


#Extract to a data frame
EVT.df = as.data.frame(
                extract(EVT.LF, 
                 spTransform(Mod.pnts, 
                    proj4string(EVT.LF), method = "simple")))


head(EVT.df)
##   evt2001secmaj evt2008secmaj evt2010secmaj evt2012secmaj evt2014secmaj
## 1          2302          2302          3302          3302          3302
## 2          2302          2302          3302          3302          3302
## 3          2302          2302          3302          3302          3302
## 4          2302          2302          3302          3302          3302
## 5          2302          2302          3302          3302          3302
## 6          2302          2302          3302          3302          3302
#create year column
EVT.mlt = melt(EVT.df)
## No id variables; using all as measure variables
EVT.mlt$Year = substr(EVT.mlt$variable, 4, 7)
head(EVT.mlt)
##        variable value Year
## 1 evt2001secmaj  2302 2001
## 2 evt2001secmaj  2302 2001
## 3 evt2001secmaj  2302 2001
## 4 evt2001secmaj  2302 2001
## 5 evt2001secmaj  2302 2001
## 6 evt2001secmaj  2302 2001
Available.yrs = as.numeric(unique(EVT.mlt$Year)) #ID available years in dataset, convert to number
Available.yrs
## [1] 2001 2008 2010 2012 2014
Mod.pnts$Year = as.integer(Mod.pnts$Year) #Convert to number

Mod.pnts$Look.up = Available.yrs[sapply(Mod.pnts$Year, Close.match)] #Overwrite prior look up year

#Assign column based on Look.up year
Mod.pnts$EVT = ifelse(Mod.pnts$Look.up == 2014, EVT.df$evt2014secmaj,
                      ifelse(Mod.pnts$Look.up == 2001, EVT.df$evt2001secmaj,
                          ifelse(Mod.pnts$Look.up == 2010, EVT.df$evt2010secmaj,
                            ifelse(Mod.pnts$Look.up == 2012, EVT.df$evt2012secmaj,
                                ifelse(Mod.pnts$Look.up == 2008, EVT.df$evt2008secmaj,
                                  NA)))))

Mod.pnts$EVT[is.na(Mod.pnts$EVT)] = 0

7 Data Scaling

Tmax.sc = scale(Mod.pnts$TMAX, scale=T, center=T)
Mod.pnts$TMAX.sc = as.numeric(Tmax.sc)

Ppt.sc = scale(Mod.pnts$PPT, scale=T, center=T)
Mod.pnts$PPT.sc = as.numeric(Ppt.sc)

loc.sc = scale(Mod.pnts$loc, scale=T, center = T)
Mod.pnts$loc.sc = as.numeric(loc.sc)

maj.sc= scale(Mod.pnts$maj, scale=T, center = T)
Mod.pnts$maj.sc = as.numeric(maj.sc)

Check Data

Chk.frm = as.data.frame(
              Mod.pnts@data %>%
                group_by(Year, Source, OBS) %>%
                summarise(Count = length(Year)))
Chk.frm
##    Year Source OBS Count
## 1  2000   MDNR   1    67
## 2  2000   Mesh   0  1984
## 3  2001   MDNR   1    63
## 4  2001   Mesh   0  1984
## 5  2002   MDNR   1    63
## 6  2002   Mesh   0  1984
## 7  2003   MDNR   1   113
## 8  2003   Mesh   0  1984
## 9  2004   MDNR   1   134
## 10 2004   Mesh   0  1984
## 11 2005   MDNR   1   125
## 12 2005   Mesh   0  1984
## 13 2006   MDNR   1   144
## 14 2006   Mesh   0  1984
## 15 2007   MDNR   1   231
## 16 2007   Mesh   0  1984
## 17 2008   MDNR   1   216
## 18 2008   Mesh   0  1984
## 19 2009   MDNR   1   214
## 20 2009   Mesh   0  1984
## 21 2010   MDNR   1   253
## 22 2010   Mesh   0  1984
## 23 2011   MDNR   1   177
## 24 2011   Mesh   0  1984
## 25 2012   MDNR   1   273
## 26 2012   Mesh   0  1984
## 27 2013   MDNR   1   249
## 28 2013   Mesh   0  1984
## 29 2014   MDNR   1   299
## 30 2014   Mesh   0  1984
## 31 2015   MDNR   1   218
## 32 2015   Mesh   0  1984
## 33 2016   MDNR   1    95
## 34 2016   Mesh   0  1984
## 35 2017   MDNR   1   169
## 36 2017   Mesh   0  1984
## 37 2018   MDNR   1   346
## 38 2018   Mesh   0  1984
#Reduce data
Mod.pnts$UP = is.na(over(Mod.pnts, UP.union.prj))
Mod.pnts = subset(Mod.pnts, UP == FALSE)

8 Construct Model

Select Time Steps and Knots (time) This model uses an autoregressive term for temporal correlation.

k1 = length(levels(factor(Mod.pnts$Year))) #Total Years in the data

Mod.pnts$Step = as.integer(as.factor(Mod.pnts$Year)) #Integer version
range(Mod.pnts$Step)
## [1]  1 19
#Reduce temporal dimenstion
gtime1 = seq(1, k1, 2) #Group all available years by 2
mesh.t1 = inla.mesh.1d(gtime1, degree = 1) #Converting to matrix

locs1 = cbind(Mod.pnts$Longp, Mod.pnts$Latp) #Marten locations

A.mart <- inla.spde.make.A(mesh = mesh,
                           loc = locs1,
                           group.mesh = mesh.t1, #Time knots
                           group = Mod.pnts$Step) #All data years

Spatial Prior and Index

FE.df = Mod.pnts@data

spde0 = inla.spde2.pcmatern(mesh, 
                            alpha = 2,
                            prior.range=c(500, 0.01),  
                            prior.sigma=c(1, 0.01),
                            constr = TRUE)

field1 = inla.spde.make.index("field1", 
                               spde0$n.spde,
                               n.group=mesh.t1$n) 

Organize data

FE.df$Age2 = FE.df$Age
FE.df$Age2[is.na(FE.df$Age2)] = "Unknown"


FE.lst = list(c(field1, #Spatial index (created in step above)
                list(intercept1 = 1)), #intercept
                list(TMAX = FE.df[,"TMAX.sc"], #Max Temp covariate
                     PPT = FE.df[,"PPT.sc"], #Precip covariate
                     loc.sc = FE.df[,"loc.sc"], #Road variable 1
                     maj.sc = FE.df[,"maj.sc"], #Road variable 2
                     Age = FE.df[,"Age2"], 
                     vDist = FE.df[,"VDISTURB"], #Veg disturbance
                     CBD = FE.df[,"CBD"], #bulk density
                     EVC = FE.df[,"EVC"], #cover type
                     EVH = FE.df[,"EVH"],
                     EVT = FE.df[,"EVT"],
                     Year = FE.df[,"Year"],
                     Sex = FE.df[,"Sex"])) 

Stack1 = inla.stack(data = list(PA = FE.df$OBS), #PA = either a 1 (marten) or zero (mesh point/node)
                                  A = list(A.mart, 1), #created 2 steps above, loaction info
                            effects = FE.lst,   
                                tag = "mart.0") #just an arbitrary name for the file

#Temporal Reduced
#save(list=c("Stack1", "spde0"), file="./HPCC/FMart_082419.RData") #New One 8/24/19

9 Non Spatial Model

These were previoluy run: load results:

load("E:/HarvMod/HarvestModeling/HPCC/Results_082619.RData")
h.spec = list(theta=list(prior='pccor1', param=c(0, 0.9)))

YearPrior = list(prec = list(prior="pc.prec", param = c(1, 0.0001)))  

iid.prior = list(theta=list(prior = "normal", param=c(0, 5))) #LandFire prior


Frm0 = PA ~ -1 + intercept1 + 
                  #f(field1, #Turn off space-time effect
                  #  model=spde, 
                  #  group = field1.group, 
                  #  control.group=list(model="ar1"), 
                  #  hyper=h.spec) +
                  f(vDist, 
                    model="iid",
                    hyper = iid.prior) +
                  f(CBD, 
                    model="iid",
                    hyper = iid.prior) +
                  f(EVC, 
                    model="iid",
                    hyper = iid.prior) +
                  f(EVH, 
                    model="iid",
                    hyper = iid.prior) +
                  f(EVT, 
                    model="iid",
                    hyper = iid.prior) +
                  f(Year, 
                    model="iid",
                    hyper = iid.prior) +
                  f(Sex, 
                    model="iid",
                    hyper = iid.prior) +
                  f(Age, 
                    model="iid",
                    hyper = iid.prior) +
                 TMAX + PPT + loc.sc + maj.sc 



Model0 = inla(Frm0, 
             data = inla.stack.data(Stack1, spde=spde0), 
             family = "binomial",
             verbose = TRUE,
             control.fixed = list(prec = 0.01, 
                                  prec.intercept = 0.001), 
             control.predictor = list(
                                    A = inla.stack.A(Stack1), 
                                    compute = TRUE, 
                                    link = 1), 
            control.inla = list(int.strategy = "eb"),
             control.results = list(return.marginals.random = TRUE,
                                    return.marginals.predictor = TRUE),
             control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE)) 
summary(Model0)
## 
## Call:
## c("inla(formula = Frm0, family = \"binomial\", data = inla.stack.data(Stack1, ",  "    spde = spde0), verbose = TRUE, control.compute = list(dic = TRUE, ",  "    cpo = TRUE, waic = TRUE), control.predictor = list(A = inla.stack.A(Stack1), ",  "    compute = TRUE, link = 1), control.inla = list(int.strategy = \"eb\"), ",  "    control.results = list(return.marginals.random = TRUE, return.marginals.predictor = TRUE), ",  "    control.fixed = list(prec = 0.01, prec.intercept = 0.001))" )
## 
## Time used:
##  Pre-processing    Running inla Post-processing           Total 
##          2.3389       1975.3925          1.9697       1979.7011 
## 
## Fixed effects:
##               mean     sd 0.025quant 0.5quant 0.975quant    mode kld
## intercept1 -2.1889 1.2260    -4.5974  -2.1885     0.2146 -2.1875   0
## TMAX        0.2018 0.2399    -0.2691   0.2017     0.6725  0.2017   0
## PPT        -0.0538 0.2421    -0.5292  -0.0538     0.4212 -0.0538   0
## loc.sc      0.1052 0.0212     0.0634   0.1053     0.1468  0.1054   0
## maj.sc     -0.0100 0.0213    -0.0520  -0.0099     0.0315 -0.0097   0
## 
## Random effects:
## Name   Model
##  vDist   IID model 
## CBD   IID model 
## EVC   IID model 
## EVH   IID model 
## EVT   IID model 
## Year   IID model 
## Sex   IID model 
## Age   IID model 
## 
## Model hyperparameters:
##                       mean     sd 0.025quant 0.5quant 0.975quant   mode
## Precision for vDist 1.2292 0.5543     0.4756   1.1222     2.6110 0.9341
## Precision for CBD   0.9813 0.4157     0.4118   0.9016     2.0127 0.7633
## Precision for EVC   1.2985 0.4792     0.6143   1.2141     2.4692 1.0646
## Precision for EVH   1.0346 0.4434     0.4288   0.9495     2.1334 0.8016
## Precision for EVT   1.6129 0.5131     0.8320   1.5378     2.8286 1.3984
## Precision for Year  1.0279 0.3146     0.5405   0.9848     1.7673 0.9039
## Precision for Sex   0.6813 0.3419     0.2543   0.6038     1.5582 0.4820
## Precision for Age   0.2374 0.0703     0.1302   0.2270     0.4033 0.2078
## 
## Expected number of effective parameters(std dev): 102.74(0.00)
## Number of equivalent replicates : 286.00 
## 
## Deviance Information Criterion (DIC) ...............: 11719.47
## Deviance Information Criterion (DIC, saturated) ....: 11719.47
## Effective number of parameters .....................: 100.24
## 
## Watanabe-Akaike information criterion (WAIC) ...: 11713.76
## Effective number of parameters .................: 90.93
## 
## Marginal log-Likelihood:  -6007.32 
## CPO and PIT are computed
## 
## Posterior marginals for linear predictor and fitted values computed

10 Spatial-temporal Model

h.spec = list(theta=list(prior='pccor1', param=c(0, 0.9)))

YearPrior = list(prec = list(prior="pc.prec", param = c(1, 0.0001)))  

iid.prior = list(theta=list(prior = "normal", param=c(0, 5))) #LandFire prior


Frm1 = PA ~ -1 + intercept1 + 
                  f(field1, 
                    model=spde0, 
                    group = field1.group, 
                    control.group=list(model="ar1"), 
                    hyper=h.spec) +
                  f(vDist, 
                    model="iid",
                    hyper = iid.prior) +
                  f(CBD, 
                    model="iid",
                    hyper = iid.prior) +
                  f(EVC, 
                    model="iid",
                    hyper = iid.prior) +
                  f(EVH, 
                    model="iid",
                    hyper = iid.prior) +
                  f(EVT, 
                    model="iid",
                    hyper = iid.prior) +
                  f(Year, 
                    model="iid",
                    hyper = iid.prior) +
                  f(Age, 
                    model="iid",
                    hyper = iid.prior) +
                  f(Sex, 
                    model="iid",
                    hyper = iid.prior) +
                 TMAX + PPT + loc.sc + maj.sc 

#theta1 = Model1$internal.summary.hyperpar$mean
theta1 = c(4.556413, 0.969241, 5.673482, 0.116404, -0.383791, 0.367687, -0.260188, 0.592449, -0.019105, -1.560902, -0.425632)

Model1 = inla(Frm1, 
             data = inla.stack.data(Stack1), 
             family = "binomial",
             verbose = TRUE,
             control.fixed = list(prec = 0.01, 
                                  prec.intercept = 0.001), 
             control.predictor = list(
                                    A = inla.stack.A(Stack1), 
                                    compute = TRUE, 
                                    link = 1), 
             control.mode = list(restart = TRUE, theta = theta1),
             control.inla = list(strategy = "gaussian", int.strategy = "eb"),
             control.results = list(return.marginals.random = TRUE,
                                    return.marginals.predictor = TRUE),
             control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE)) 
summary(Model1)
## 
## Call:
## c("inla(formula = Frm1, family = \"binomial\", data = inla.stack.data(Stack1), ",  "    verbose = TRUE, control.compute = list(dic = TRUE, cpo = TRUE, ",  "        waic = TRUE), control.predictor = list(A = inla.stack.A(Stack1), ",  "        compute = TRUE, link = 1), control.inla = list(strategy = \"gaussian\", ",  "        int.strategy = \"eb\"), control.results = list(return.marginals.random = TRUE, ",  "        return.marginals.predictor = TRUE), control.fixed = list(prec = 0.01, ",  "        prec.intercept = 0.001), num.threads = 8)")
## 
## Time used:
##  Pre-processing    Running inla Post-processing           Total 
##          2.2321      56054.0006          4.4843      56060.7170 
## 
## Fixed effects:
##               mean     sd 0.025quant 0.5quant 0.975quant    mode kld
## intercept1 -4.5470 1.3050    -7.1092  -4.5471    -1.9870 -4.5470   0
## TMAX        0.1778 0.2459    -0.3051   0.1778     0.6602  0.1778   0
## PPT        -0.0027 0.2529    -0.4992  -0.0027     0.4934 -0.0027   0
## loc.sc      0.2031 0.0296     0.1450   0.2031     0.2612  0.2031   0
## maj.sc      0.0284 0.0286    -0.0277   0.0284     0.0845  0.0284   0
## 
## Random effects:
## Name   Model
##  field1   SPDE2 model 
## vDist   IID model 
## CBD   IID model 
## EVC   IID model 
## EVH   IID model 
## EVT   IID model 
## Year   IID model 
## Age   IID model 
## Sex   IID model 
## 
## Model hyperparameters:
##                        mean      sd 0.025quant 0.5quant 0.975quant    mode
## Range for field1    93.2774 13.8092    69.3530  92.1533   123.4641 89.8641
## Stdev for field1     2.7089  0.3534     2.0850   2.6846     3.4717  2.6357
## GroupRho for field1  0.9829  0.0063     0.9676   0.9840     0.9921  0.9860
## Precision for vDist  1.2034  0.5161     0.4802   1.1109     2.4707  0.9416
## Precision for CBD    0.8161  0.3468     0.3361   0.7516     1.6748  0.6373
## Precision for EVC    1.6720  0.6580     0.7416   1.5548     3.2851  1.3462
## Precision for EVH    0.8816  0.3664     0.3696   0.8147     1.7816  0.6955
## Precision for EVT    1.9236  0.6945     0.8814   1.8209     3.5714  1.6243
## Precision for Year   1.0191  0.3285     0.5315   0.9665     1.8083  0.8706
## Precision for Age    0.2104  0.0593     0.1144   0.2039     0.3459  0.1913
## Precision for Sex    0.7599  0.3886     0.2801   0.6704     1.7601  0.5316
## 
## Expected number of effective parameters(std dev): 471.33(0.00)
## Number of equivalent replicates : 62.34 
## 
## Deviance Information Criterion (DIC) ...............: 9486.29
## Deviance Information Criterion (DIC, saturated) ....: NULL
## Effective number of parameters .....................: 490.26
## 
## Watanabe-Akaike information criterion (WAIC) ...: 9413.73
## Effective number of parameters .................: 396.72
## 
## Marginal log-Likelihood:  -5043.66 
## CPO and PIT are computed
## 
## Posterior marginals for linear predictor and fitted values computed

11 Results

11.1 Age Effect

mic.df = as.data.frame(Model1$summary.random$Age)
names(mic.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")

ggplot(mic.df, aes(x=factor(ID), y=Mean)) + 
                geom_point(size=2, pch=19, col = "red") +
                geom_linerange(aes(ymin=Q025, ymax=Q975), colour="black") +
                geom_hline(yintercept = 0, 
                               linetype = "dotted",
                               colour = "red",
                               size = 1) +
                theme_classic() +
                           xlab("Age Effect") +
                           ylab("Effect (logit)") +
                           theme(axis.title.y = element_text(face="bold", size=16),
                                 axis.title.x = element_text(face="bold", size=14),
                                 axis.text.y = element_text(face="bold", size=16),
                                 axis.text.x = element_text(face="bold", size=10, 
                                              vjust=1, hjust=1, angle=45),
                                 strip.text = element_text(face="bold", size = 20))

11.2 Sex Effect

mic.df = as.data.frame(Model1$summary.random$Sex)[2:3,]
names(mic.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")

ggplot(mic.df, aes(x=ID, y=Mean)) + 
                geom_point(size=2, pch=19, col = "red") +
                geom_linerange(aes(ymin=Q025, ymax=Q975), colour="black") +
                geom_hline(yintercept = 0, 
                               linetype = "dotted",
                               colour = "red",
                               size = 1) +
                theme_classic() +
                           xlab("Sex Effect") +
                           ylab("Effect (logit)") +
                           theme(axis.title.y = element_text(face="bold", size=16),
                                 axis.title.x = element_text(face="bold", size=14),
                                 axis.text.y = element_text(face="bold", size=16),
                                 axis.text.x = element_text(face="bold", size=10, 
                                              vjust=1, hjust=1, angle=45),
                                 strip.text = element_text(face="bold", size = 20))

11.3 Year Effect

mic.df = as.data.frame(Model1$summary.random$Year)
names(mic.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")

ggplot(mic.df, aes(x=factor(ID), y=Mean)) + 
                geom_point(size=2, pch=19, col = "red") +
                geom_linerange(aes(ymin=Q025, ymax=Q975), colour="black") +
                geom_hline(yintercept = 0, 
                               linetype = "dotted",
                               colour = "red",
                               size = 1) +
                theme_classic() +
                           xlab("Year Effect") +
                           ylab("Effect (logit)") +
                           theme(axis.title.y = element_text(face="bold", size=16),
                                 axis.title.x = element_text(face="bold", size=14),
                                 axis.text.y = element_text(face="bold", size=16),
                                 axis.text.x = element_text(face="bold", size=10, 
                                              vjust=1, hjust=1, angle=45),
                                 strip.text = element_text(face="bold", size = 20))

11.4 VDIST

mic.df = as.data.frame(Model1$summary.random$vDist)
names(mic.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")

ggplot(mic.df, aes(x=factor(ID), y=Mean)) + 
                geom_point(size=2, pch=19, col = "red") +
                geom_linerange(aes(ymin=Q025, ymax=Q975), colour="black") +
                geom_hline(yintercept = 0, 
                               linetype = "dotted",
                               colour = "red",
                               size = 1) +
                theme_classic() +
                           xlab("VDIST Effect") +
                           ylab("Effect (logit)") +
                           theme(axis.title.y = element_text(face="bold", size=16),
                                 axis.title.x = element_text(face="bold", size=14),
                                 axis.text.y = element_text(face="bold", size=16),
                                 axis.text.x = element_text(face="bold", size=10, 
                                              vjust=1, hjust=1, angle=45),
                                 strip.text = element_text(face="bold", size = 20))

11.5 CBD

mic.df = as.data.frame(Model1$summary.random$CBD)
names(mic.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")

ggplot(mic.df, aes(x=factor(ID), y=Mean)) + 
                geom_point(size=2, pch=19, col = "red") +
                geom_linerange(aes(ymin=Q025, ymax=Q975), colour="black") +
                geom_hline(yintercept = 0, 
                               linetype = "dotted",
                               colour = "red",
                               size = 1) +
                theme_classic() +
                           xlab("CBD Effect") +
                           ylab("Effect (logit)") +
                           theme(axis.title.y = element_text(face="bold", size=16),
                                 axis.title.x = element_text(face="bold", size=14),
                                 axis.text.y = element_text(face="bold", size=16),
                                 axis.text.x = element_text(face="bold", size=10, 
                                              vjust=1, hjust=1, angle=45),
                                 strip.text = element_text(face="bold", size = 20))

11.6 EVC

mic.df = as.data.frame(Model1$summary.random$EVC)
names(mic.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")

ggplot(mic.df, aes(x=factor(ID), y=Mean)) + 
                geom_point(size=2, pch=19, col = "red") +
                geom_linerange(aes(ymin=Q025, ymax=Q975), colour="black") +
                geom_hline(yintercept = 0, 
                               linetype = "dotted",
                               colour = "red",
                               size = 1) +
                theme_classic() +
                           xlab("EVC Effect") +
                           ylab("Effect (logit)") +
                           theme(axis.title.y = element_text(face="bold", size=16),
                                 axis.title.x = element_text(face="bold", size=14),
                                 axis.text.y = element_text(face="bold", size=16),
                                 axis.text.x = element_text(face="bold", size=10, 
                                              vjust=1, hjust=1, angle=45),
                                 strip.text = element_text(face="bold", size = 20))

11.7 EVH

mic.df = as.data.frame(Model1$summary.random$EVH)
names(mic.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")

ggplot(mic.df, aes(x=factor(ID), y=Mean)) + 
                geom_point(size=2, pch=19, col = "red") +
                geom_linerange(aes(ymin=Q025, ymax=Q975), colour="black") +
                geom_hline(yintercept = 0, 
                               linetype = "dotted",
                               colour = "red",
                               size = 1) +
                theme_classic() +
                           xlab("EVH Effect") +
                           ylab("Effect (logit)") +
                           theme(axis.title.y = element_text(face="bold", size=16),
                                 axis.title.x = element_text(face="bold", size=14),
                                 axis.text.y = element_text(face="bold", size=16),
                                 axis.text.x = element_text(face="bold", size=10, 
                                              vjust=1, hjust=1, angle=45),
                                 strip.text = element_text(face="bold", size = 20))

11.8 EVT

mic.df = as.data.frame(Model1$summary.random$EVT)
names(mic.df) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975")

ggplot(mic.df, aes(x=factor(ID), y=Mean)) + 
                geom_point(size=2, pch=19, col = "red") +
                geom_linerange(aes(ymin=Q025, ymax=Q975), colour="black") +
                geom_hline(yintercept = 0, 
                               linetype = "dotted",
                               colour = "red",
                               size = 1) +
                theme_classic() +
                           xlab("EVT Effect") +
                           ylab("Effect (logit)") +
                           theme(axis.title.y = element_text(face="bold", size=16),
                                 axis.title.x = element_text(face="bold", size=14),
                                 axis.text.y = element_text(face="bold", size=16),
                                 axis.text.x = element_text(face="bold", size=10, 
                                              vjust=1, hjust=1, angle=45),
                                 strip.text = element_text(face="bold", size = 20))

11.9 View Random Fields

F.s1 = cbind(Model1$summary.random$field1$mean, 
             field1$field1.group)

s1xmean = list()
s1xmean = split(F.s1[,1], F.s1[,2])


Pred.pnts = spTransform(Grd.pnt, nProj)

Grd_A = inla.spde.make.A(mesh, loc=Pred.pnts@coords)


for(i in 1:10){
  
Pred.pnts$m1RE = drop(Grd_A %*% s1xmean[[i]])

tmp.r = rasterize(spTransform(Pred.pnts, proj4string(UP)), 
                  Domain.r, 
                  "m1RE", 
                  background = NA)

      if(i == 1){M0.stk = tmp.r}
          else{M0.stk = stack(M0.stk, tmp.r)}

}

names(M0.stk) = paste("Knot", 1:10, sep="_")
range(M0.stk)
## class       : RasterBrick 
## dimensions  : 317, 698, 221266, 2  (nrow, ncol, ncell, nlayers)
## resolution  : 0.01, 0.01  (x, y)
## extent      : -90.41829, -83.43829, 45.09269, 48.26269  (xmin, xmax, ymin, ymax)
## coord. ref. : +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0 
## data source : in memory
## names       : range_min, range_max 
## min values  : -4.209383, -3.752171 
## max values  :  4.449773,  5.016270
rng = seq(-4.25, 5.21, 0.01)

mCols = c("#A50026", "#D73027", "#F46D43", "#FDAE61", "#E0F3F8", "#ABD9E9", "#74ADD1", "#4575B4","#313695") 
#brewer.pal(11, "RdBu")

cr = colorRampPalette(c(rev(mCols)),  
                        bias = 1.4, space = "rgb")

RF0osi = levelplot(M0.stk, 
               margin = FALSE,
               layout=c(2, 5),
               xlab = " ", ylab = NULL, 
               col.regions = cr, at = rng,
               colorkey = list(labels=list(cex=1.5),
                               space = "right"),
               par.settings = list(axis.line = list(col = "black"), 
                                   strip.background = list(col = 'transparent'), 
                                   strip.border = list(col = 'transparent')),
                                   scales = list(cex = 1.5)) + 
        latticeExtra::layer(sp.polygons(UP, col = "black", lwd = 1)) 

RF0osi