This code serves as an example of models and analyses developed to better understand spatial and temporal trends in the risk of avian influenza transmission between wild waterfowl and domestic poultry. This study is in early stages and should be considered preliminary and exploratory only.
CITATIONs REFERENCED IN THIS SCRIPT
Story Board: Avian Influenza in the United States:
https://usgs.maps.arcgis.com/apps/MapJournal/index.html?appid=37f0eacdaccf4c2fbe5f65535ddc7e8c
Prosser, D. J., Hungerford, L. L., Erwin, R. M., Ottinger, M. A., Takekawa, J. Y., & Ellis, E. C. (2013).
Mapping Avian Influenza Transmission Risk at the Interface of Domestic Poultry and Wild Birds.
Frontiers in Public Health, 1(August), 1–11. https://doi.org/10.3389/fpubh.2013.00028
Humphreys, J.H., J.M. Murrow, J.D. Sullivan, and D.J. Prosser, 2019, Seasonal occurrence and abundance of dabbling ducks across the continental United States: Joint spati‐temporal modeling for the Genus Anas. Diversity and Distributions https://onlinelibrary.wiley.com/doi/full/10.1111/ddi.12960
Humphreys, J.M., Murrow, J.L., Sullivan, J.D., and Prosser, D.J., 2019, Spatio-temporal distribution models for dabbling duck species across the continental United States: U.S. Geological Survey data release, https://doi.org/10.5066/P9DDR3OB.
Patyk, K. A., McCool-Eye, M. J., South, D. D., Burdett, C. L., Maroney, S. A., Fox, A., Kuiper, G., & Magzamen, S. (2020). Modelling the domestic poultry population in the United States: A novel approach leveraging remote sensing and synthetic data methods. Geospatial Health, 15(2). https://doi.org/10.4081/gh.2020.913
Define the domain boundaries for the study area.
= "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
LL84
= readOGR(dsn = "D:/County_WNV/Arc/Counties",
Counties layer = "Counties4",
stringsAsFactors = FALSE)
## OGR data source with driver: ESRI Shapefile
## Source: "D:\County_WNV\Arc\Counties", layer: "Counties4"
## with 3109 features
## It has 73 fields
## Integer64 fields read as strings: OBJECTID Pop2010 Pop_18_24 HS18_24tot BD18_24tot Pop_25up HS25up_tot BD25up_tot Pop_18up HS18up_tot BD18up_tot
#States
= "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
LL84
= map("state",
States fill = TRUE,
plot = FALSE)
= sapply(strsplit(States$names, ":"),
IDs function(x) x[1])
= map2SpatialPolygons(States, IDs = IDs,
States proj4string = CRS(LL84))
= sapply(slot(States, "polygons"),
pid function(x) slot(x, "ID"))
= data.frame(ID=1:length(States),
p.df row.names = pid)
= SpatialPolygonsDataFrame(States, p.df)
States
= spTransform(States, CRS(proj4string(Counties)))
States
= fortify(Counties) Cmap_df
## Regions defined for each Polygons
= fortify(States) cStates
## Regions defined for each Polygons
ggplot(Cmap_df,
aes(long,lat, group=group)) +
geom_polygon(col="gray70", fill="gray90", size = 0.05) +
geom_polygon(data = cStates,
aes(long,lat, group=group),
col="gray50", fill = "transparent", size = 0.15) +
xlab(" ") +
ylab(" ") +
coord_equal() +
ggtitle("Study Area") +
theme(panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.background = element_blank(),
plot.background = element_blank(),
panel.border = element_blank(),
strip.text = element_text(size=15, face="bold"),
strip.background = element_blank(),
legend.key.size = unit(1,"line"),
legend.key.width = unit(3,"line"),
legend.text = element_text(size=13, face="bold"),
legend.title = element_text(size=18, face="bold"),
legend.position = "bottom",
axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
plot.title = element_text(size=25, face="bold", hjust = 0.5))
Wild bird occurrence and abundance was originally modeled by season (spring, breeding, fall, winter). To ultimately produce weekly risk estimates, seasonal abundance estimates need to be disaggregated to weekly timesteps. To accomplish this, the originally used wild bird observation data (from eBird) is used to estimate the weekly probability of birds being at a particular latitude.
Bird Abundance estimates.
Season files available as part of a USGS data release:
Humphreys, J.M., Murrow, J.L., Sullivan, J.D., and Prosser, D.J., 2019, Spatio-temporal distribution models for dabbling duck species across the continental United States: U.S. Geological Survey data release, https://doi.org/10.5066/P9DDR3OB.
Copies have also been saved locally.
Create a raster stack from seasonal wild bird abundance estimates:
= list.files(path="D:/Dabbler_Pre/Dabbler_project/Raster_outputs",
US.abun pattern = "abun_", full.names = TRUE) #file names
= substr(US.abun, 52, 55)
Spp.lst unique(Spp.lst)
## [1] "ambd" "amew" "buwt" "cint" "gadw" "gnwt" "mall" "motd" "norp" "nors"
## [11] "wood"
= substr(US.abun, 57, 60)
Seas.lst unique(Seas.lst)
## [1] "Bree" "Fall" "Spri" "Wint"
= as.data.frame(
Call.df cbind(
Path = US.abun,
Spp = Spp.lst,
Seas = Seas.lst
)
)
= raster::stack(US.abun) #Stack rasters Abund.stk
Load eBird observations for all species:
= list.files(path="D:/Dabbler_Pre/Dabbler_project/eBird_data/All_Observations",
eBird.call full.names = TRUE)
= as.data.frame(
eCall.df cbind(
Path = eBird.call,
Spp = substr(eBird.call, 64, 67)))
Create Weekly rasters for WINTER
Winter include Weeks 50 -53 and 1-10
CleanBird() is a custom function to clean eBird data. It removes unneeded columns, checks coordinates, etc.
= levels(as.factor(Call.df$Spp))
Spp.vect
for(i in 1:length(Spp.vect)){
= Spp.vect[i]
loop.spp
= Call.df %>% filter(Spp == loop.spp & Seas == "Wint")
sppCall
= raster(paste(sppCall$Path))
Wint.rast
= eCall.df %>% filter(Spp == loop.spp)
Spp.df
CleanBird(paste(Spp.df$Path), "eTxt", 2016, 2017)
$Week = week(eTxt$Date)
eTxt= eTxt %>% filter(Week >= 50 | Week < 10)
eTxt
head(eTxt)
= levels(factor(eTxt$Week))
Week.lvls
= rasterToPoints(Wint.rast, sp = TRUE)
NORS.grd
@data = NORS.grd@data %>%
NORS.grdmutate(Long = NORS.grd@coords[,1],
Lat = NORS.grd@coords[,2])
for(j in 1:length(Week.lvls)){
= eTxt %>% filter(Week == Week.lvls[j])
weekly.obs
= mean(weekly.obs$Lat)
lat.mean
lat.mean
= sd(weekly.obs$Lat)*sqrt((length(weekly.obs$Lat)-1)/(length(weekly.obs$Lat)))
lat.sd
$Zs = (NORS.grd$Lat - lat.mean)/lat.sd
NORS.grd
$Prob = pnorm(NORS.grd$Zs, lower.tail = FALSE)
NORS.grd
= rasterize(NORS.grd,
tmp.r
Wint.rast, "Prob",
background = NA)
= tmp.r*Wint.rast
tmp2.r
= paste(loop.spp, Week.lvls[j], sep="_")
Label
writeRaster(tmp2.r, paste("D:/Interface_USDA/National/", Label,".tif", sep=""))
} }
Create Weekly rasters for SPRING
= levels(as.factor(Call.df$Spp))
Spp.vect
for(i in 1:length(Spp.vect)){
= Spp.vect[i]
loop.spp
= Call.df %>% filter(Spp == loop.spp & Seas == "Spri")
sppCall
= raster(paste(sppCall$Path))
Wint.rast
= eCall.df %>% filter(Spp == loop.spp)
Spp.df
CleanBird(paste(Spp.df$Path), "eTxt", 2016, 2017)
$Week = week(eTxt$Date)
eTxt= eTxt %>% filter(Week >= 10 & Week < 20)
eTxt
head(eTxt)
= levels(factor(eTxt$Week))
Week.lvls
= rasterToPoints(Wint.rast, sp = TRUE)
NORS.grd
@data = NORS.grd@data %>%
NORS.grdmutate(Long = NORS.grd@coords[,1],
Lat = NORS.grd@coords[,2])
for(j in 1:length(Week.lvls)){
= eTxt %>% filter(Week == Week.lvls[j])
weekly.obs
= mean(weekly.obs$Lat)
lat.mean
lat.mean
= sd(weekly.obs$Lat)*sqrt((length(weekly.obs$Lat)-1)/(length(weekly.obs$Lat)))
lat.sd
$Zs = (NORS.grd$Lat - lat.mean)/lat.sd
NORS.grd
$Prob = pnorm(NORS.grd$Zs, lower.tail = FALSE)
NORS.grd
= rasterize(NORS.grd,
tmp.r
Wint.rast, "Prob",
background = NA)
= tmp.r*Wint.rast
tmp2.r
= paste(loop.spp, Week.lvls[j], sep="_")
Label
writeRaster(tmp2.r, paste("D:/Interface_USDA/National/", Label,".tif", sep=""), overwrite=TRUE)
} }
Create Weekly rasters for BREEDING season
= levels(as.factor(Call.df$Spp))
Spp.vect
for(i in 1:length(Spp.vect)){
= Spp.vect[i]
loop.spp
= Call.df %>% filter(Spp == loop.spp & Seas == "Bree")
sppCall
= raster(paste(sppCall$Path))
Wint.rast
= eCall.df %>% filter(Spp == loop.spp)
Spp.df
CleanBird(paste(Spp.df$Path), "eTxt", 2016, 2017)
$Week = week(eTxt$Date)
eTxt= eTxt %>% filter(Week >= 20 & Week < 36)
eTxt
head(eTxt)
= levels(factor(eTxt$Week))
Week.lvls
= rasterToPoints(Wint.rast, sp = TRUE)
NORS.grd
@data = NORS.grd@data %>%
NORS.grdmutate(Long = NORS.grd@coords[,1],
Lat = NORS.grd@coords[,2])
for(j in 1:length(Week.lvls)){
= eTxt %>% filter(Week == Week.lvls[j])
weekly.obs
= mean(weekly.obs$Lat)
lat.mean
lat.mean
= sd(weekly.obs$Lat)*sqrt((length(weekly.obs$Lat)-1)/(length(weekly.obs$Lat)))
lat.sd
$Zs = (NORS.grd$Lat - lat.mean)/lat.sd
NORS.grd
$Prob = pnorm(NORS.grd$Zs, lower.tail = FALSE)
NORS.grd
= rasterize(NORS.grd,
tmp.r
Wint.rast, "Prob",
background = NA)
= tmp.r*Wint.rast
tmp2.r
= paste(loop.spp, Week.lvls[j], sep="_")
Label
writeRaster(tmp2.r, paste("D:/Interface_USDA/National/", Label,".tif", sep=""), overwrite=TRUE)
} }
Create Weekly rasters for FALL
= levels(as.factor(Call.df$Spp))
Spp.vect
for(i in 1:length(Spp.vect)){
= Spp.vect[i]
loop.spp
= Call.df %>% filter(Spp == loop.spp & Seas == "Bree")
sppCall
= raster(paste(sppCall$Path))
Wint.rast
= eCall.df %>% filter(Spp == loop.spp)
Spp.df
CleanBird(paste(Spp.df$Path), "eTxt", 2016, 2017)
$Week = week(eTxt$Date)
eTxt= eTxt %>% filter(Week >= 36 & Week < 50)
eTxt
head(eTxt)
= levels(factor(eTxt$Week))
Week.lvls
= rasterToPoints(Wint.rast, sp = TRUE)
NORS.grd
@data = NORS.grd@data %>%
NORS.grdmutate(Long = NORS.grd@coords[,1],
Lat = NORS.grd@coords[,2])
for(j in 1:length(Week.lvls)){
= eTxt %>% filter(Week == Week.lvls[j])
weekly.obs
= mean(weekly.obs$Lat)
lat.mean
lat.mean
= sd(weekly.obs$Lat)*sqrt((length(weekly.obs$Lat)-1)/(length(weekly.obs$Lat)))
lat.sd
$Zs = (NORS.grd$Lat - lat.mean)/lat.sd
NORS.grd
$Prob = pnorm(NORS.grd$Zs, lower.tail = FALSE)
NORS.grd
= rasterize(NORS.grd,
tmp.r
Wint.rast, "Prob",
background = NA)
= tmp.r*Wint.rast
tmp2.r
= paste(loop.spp, Week.lvls[j], sep="_")
Label
writeRaster(tmp2.r, paste("D:/Interface_USDA/National/", Label,".tif", sep=""), overwrite=TRUE)
} }
Collate weekly rasters to estimate total abundance across all species.
= list.files(path="D:/Interface_USDA/National",
Comb.abun full.names = TRUE)
= sub('.*_', "", Comb.abun)
Week.lst = gsub(".tif", "", Week.lst)
Week.lst
= as.data.frame(
Week.df cbind(
Path = Comb.abun,
Week = Week.lst
)
)
$WeekI = as.integer(as.character(Week.df$Week))
Week.df
= unique(Week.lst) myWeeks
for(i in 1:length(myWeeks)){
= Week.df %>% filter(Week == myWeeks[i])
Week.set
= stack(paste(Week.set$Path))
Weeks.r
= sum(Weeks.r)
Weeks.r
writeRaster(Weeks.r, paste("D:/Interface_USDA/Wild_sums/WS_Week_", myWeeks[i],".tif", sep=""), overwrite=TRUE)
}
Stack total abundance rasters created above.
= list.files(path="D:/Interface_USDA/Wild_sums",
Weekly_Abun full.names = TRUE)
= stack(Weekly_Abun)
Weekly_Abun dim(Weekly_Abun)
## [1] 970 2306 53
names(Weekly_Abun) = paste("Week", myWeeks, sep="_")
Check Result
Plot total abundance for Weeks 20 (SPRING) and 52 (WINTER)
= Weekly_Abun[["Week_20"]]
Check.Wk20.r = Weekly_Abun[["Week_52"]]
Check.Wk52.r
= stack(Check.Wk20.r, Check.Wk52.r)
Examples.r
= projectRaster(Examples.r, crs = CRS(proj4string(States))) Examples.r
Examples.r
## class : RasterBrick
## dimensions : 2178, 5288, 11517264, 2 (nrow, ncol, ncell, nlayers)
## resolution : 1100, 1400 (x, y)
## extent : -2895333, 2921467, -1389061, 1660139 (xmin, xmax, ymin, ymax)
## crs : +proj=aea +lat_0=37.5 +lon_0=-96 +lat_1=29.5 +lat_2=45.5 +x_0=0 +y_0=0 +datum=NAD83 +units=m +no_defs
## source : memory
## names : Week_20, Week_52
## min values : 0, 0
## max values : 293.43400, 64.54171
= seq(0, 300, 0.1)
breaks
= brewer.pal(9, "YlOrRd")
mCols = colorRampPalette(mCols)(length(breaks)-1)
cr = colorRampPalette(cr, bias = 3, space = "rgb")
cr
= levelplot(Examples.r,
M0 margin = FALSE,
layout=c(2, 1),
xlab = " ",
ylab = NULL,
#sub=" ",
main=list("Total Abundance (all species)", cex = 2, hjust = 0.5, col="gray40"),
#xlab.top = "C ", cex=20,
names.attr = c("Week 20", "Week 52"),
maxpixels = 1e5,
col.regions = cr, at = breaks,
colorkey = list(labels=list(at=c(seq(0,300, 50)),
labels=c(seq(0,300, 50)), cex=1.5),
labels=list(cex=12),
space = "bottom"),
par.strip.text = list(fontface='bold', cex=2),
par.settings = list(axis.line = list(col = "black"),
strip.background = list(col = 'transparent'),
strip.border = list(col = 'transparent')),
scales = list(cex = 1.5)) +
::layer(sp.polygons(States, col = "darkgray", lwd = 0.5))
latticeExtra
M0
Poultry data citation:
Patyk, K. A., McCool-Eye, M. J., South, D. D., Burdett, C. L., Maroney, S. A., Fox, A., Kuiper, G., & Magzamen, S. (2020). Modelling the domestic poultry population in the United States: A novel approach leveraging remote sensing and synthetic data methods. Geospatial Health, 15(2). https://doi.org/10.4081/gh.2020.913
Load data, convert to spatial points
= proj4string(States)
nProj
= read.csv("C:/Users/humph173/Documents/LabWork/Patuxent/HybridModel/broilers_hybrid_FINAL.csv",
Broilers.HM stringsAsFactors=FALSE) %>%
select(POINT_X, POINT_Y, population, commodityType, category)
= read.csv("C:/Users/humph173/Documents/LabWork/Patuxent/HybridModel/layerpullets_hybrid_FINAL.csv",
BYPullets.HM stringsAsFactors=FALSE) %>%
mutate(POINT_X = longitude,
POINT_Y = latitude,
category = Category) %>%
select(POINT_X, POINT_Y, population, commodityType, category)
= read.csv("C:/Users/humph173/Documents/LabWork/Patuxent/HybridModel/layers_hybrid_FINAL.csv",
Layers.HM stringsAsFactors=FALSE) %>%
select(POINT_X, POINT_Y, population, commodityType, category)
= read.csv("C:/Users/humph173/Documents/LabWork/Patuxent/HybridModel/turkeys_hybrid_FINAL.csv",
Turkey.HM stringsAsFactors=FALSE) %>%
select(POINT_X, POINT_Y, population, commodityType, category)
= rbind(Broilers.HM, BYPullets.HM, Layers.HM, Turkey.HM) #
Hybrid.df
= SpatialPointsDataFrame(Hybrid.df[,c("POINT_X", "POINT_Y")], Hybrid.df)
Hybrid.pnt proj4string(Hybrid.pnt) = LL84
= spTransform(Hybrid.pnt, nProj) Hybrid.pntp
Calculate distance to nearest neighbor by commodity type.
unique(Hybrid.pntp$commodityType)
## [1] "broilers" "layerpullets" "layers" "turkeys"
= as.factor(Hybrid.pntp$commodityType)
myBy
= as.ppp(Hybrid.pntp)
facility.pp
= as.data.frame(spatstat::nndist(facility.pp, k= 1, by = myBy))
fac.distances names(fac.distances) = paste("nn", names(fac.distances), sep=".")
@data = cbind(Hybrid.pntp@data, fac.distances) Hybrid.pntp
= fortify(Counties) Cmap_df
## Regions defined for each Polygons
= fortify(States) cStates
## Regions defined for each Polygons
= Hybrid.pntp@data %>%
cPoints mutate(pLong = Hybrid.pntp@coords[,1],
pLat = Hybrid.pntp@coords[,2])
$Set = ifelse(cPoints$category == "BY", "Backyard", "Commercial")
cPoints
ggplot(Cmap_df,
aes(long,lat, group=group)) +
geom_polygon(col="gray70", fill="gray90", size = 0.05) +
geom_polygon(data = cStates,
aes(long,lat, group=group),
col="gray50", fill = "transparent", size = 0.15) +
geom_point(data = cPoints,
aes(pLong, pLat, group=NA),
col="red", pch=1, cex = 0.15) +
xlab(" ") +
ylab(" ") +
facet_wrap(commodityType~Set, ncol=4) +
coord_equal() +
ggtitle("Hybrid Model Poultry") +
theme(panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.background = element_blank(),
plot.background = element_blank(),
panel.border = element_blank(),
strip.text = element_text(size=15, face="bold"),
strip.background = element_blank(),
legend.key.size = unit(1,"line"),
legend.key.width = unit(3,"line"),
legend.text = element_text(size=13, face="bold"),
legend.title = element_text(size=18, face="bold"),
legend.position = "bottom",
axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
plot.title = element_text(size=25, face="bold", hjust = 0.5))
Formula adapted from Prosser et al., 2013:
Prosser, D. J., Hungerford, L. L., Erwin, R. M., Ottinger, M. A., Takekawa, J. Y., & Ellis, E. C. (2013).
Mapping Avian Influenza Transmission Risk at the Interface of Domestic Poultry and Wild Birds.
Frontiers in Public Health, 1(August), 1–11. https://doi.org/10.3389/fpubh.2013.00028
EXAMPLE
Example Level 1 Formula:
Tw to Pte = [WEF * Vwf] * [Pte * U]
Where,
Tw = Wild Waterfowl (wild bird models, all species summed)
Pte = Terrestrial poultry (commercial chicken in this example)
WEF = Week-specific Effective waterfowl population (prevalence, used 0.3 in this example)
Vwf = Viral shedding rates for waterfowl (used 104.5 in this example)
U = Poultry virus uptake rate and infection load (10-15/103.5 in this example)
Find the wild bird abundance at each poultry facility for weeks 20 and 52:
$T_w_20 = extract(Examples.r[["Week_20"]], Hybrid.pntp) #Week 20
Hybrid.pntp$T_w_20[is.na(Hybrid.pntp$T_w_20)] = 0
Hybrid.pntp#range(Hybrid.pntp$T_w_20) #birds/square km
$T_w_52 = extract(Examples.r[["Week_52"]], Hybrid.pntp) #Week 52
Hybrid.pntp$T_w_52[is.na(Hybrid.pntp$T_w_52)] = 0
Hybrid.pntp#range(Hybrid.pntp$T_w_52) #birds/square km
Calculate effective waterfowl population size.
Assuming prevalence of 0.30 for this example, actual values being estimated/mapped as part of the project.
$W_EF_20 = Hybrid.pntp$T_w_20*0.3 #Effective population Week 20
Hybrid.pntp$W_EF_52 = Hybrid.pntp$T_w_52*0.3 #Effective population Week 52 Hybrid.pntp
Set Additional Variable Values:
$V_wf = 10^4.5 #Waterfowl virus shedding rate, value from literature
Hybrid.pntp$U = 10^15/10^3.5 #Poultry uptake rate, values from literature Hybrid.pntp
Apply example Tw to Pte Risk formula:
$Risk_week20 = (Hybrid.pntp$W_EF_20*Hybrid.pntp$V_wf)*(Hybrid.pntp$population*Hybrid.pntp$U) #Week 20
Hybrid.pntp
$Risk_week52 = (Hybrid.pntp$W_EF_52*Hybrid.pntp$V_wf)*(Hybrid.pntp$population*Hybrid.pntp$U) #Week 52 Hybrid.pntp
Create raster version of study area:
= gUnaryUnion(
States.union gBuffer(Counties, width=0, byid = F))
= rasterize(States.union,
Domain.r 1]],
Examples.r[[field = 0,
background = NA)
= aggregate(Domain.r, fact=20) #Very coarse resolution to see whole US Domain.r
Select Commercial Chicken:
unique(Hybrid.pntp$commodityType)
## [1] "broilers" "layerpullets" "layers" "turkeys"
unique(Hybrid.pntp$category)
## [1] "BY" "COML" "COMH" "COM" "SE"
= subset(Hybrid.pntp, commodityType == "layers" & category != "BY" ) CommChick
Map Estimated Risk to Raster:
= rasterize(CommChick,
Risk20.r
Domain.r, "Risk_week20",
background = 0)
= rasterize(CommChick,
Risk52.r
Domain.r, "Risk_week52",
background = 0)
= stack(Risk20.r, Risk52.r)
Risk.stk names(Risk.stk) = c("Week.20", "Week.52")
= Risk.stk + Domain.r Risk.stk
Scaling values to the range 0-1 for ease of interpretation:
values(Risk.stk) = Scale(values(Risk.stk))
Results by facility.
= fortify(Counties) Cmap_df
## Regions defined for each Polygons
= Cmap_df
Cmap_df2 $variable = "Risk Week 20"
Cmap_df$variable = "Risk Week 52"
Cmap_df2= rbind(Cmap_df, Cmap_df2)
Cmap_df
= fortify(States) cStates
## Regions defined for each Polygons
= cStates
cStates2 $variable = "Risk Week 20"
cStates$variable = "Risk Week 52"
cStates2= rbind(cStates, cStates2)
cStates
= CommChick@data %>%
cPoints mutate(pLong = CommChick@coords[,1],
pLat = CommChick@coords[,2]) %>%
select(pLong, pLat, Risk_week20, Risk_week52, category)
= melt(cPoints, c("pLong", "pLat", "category"))
cPoints
$Risk = Scale(as.numeric(cPoints$value))
cPoints$Risk[is.na(cPoints$Risk)] = 0
cPoints$variable = gsub("_", " ", cPoints$variable)
cPoints$variable = gsub("w", "W", cPoints$variable)
cPoints$variable = gsub("20", " 20", cPoints$variable)
cPoints$variable = gsub("52", " 52", cPoints$variable)
cPoints
ggplot(Cmap_df,
aes(long,lat, group=group)) +
geom_polygon(col="gray70", fill="gray90", size = 0.05) +
geom_polygon(data = cStates,
aes(long,lat, group=group),
col="gray50", fill = "transparent", size = 0.15) +
geom_point(data = cPoints,
aes(pLong, pLat, group=NA, col = Risk, size = Risk),
pch=1) +
scale_color_continuous(low = "blue", high = "red") +
xlab(" ") +
ylab(" ") +
facet_wrap(~variable, ncol=2) +
coord_equal() +
ggtitle("Individual Facility Risk") +
theme(panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.background = element_blank(),
plot.background = element_blank(),
panel.border = element_blank(),
strip.text = element_text(size=15, face="bold"),
strip.background = element_blank(),
legend.key.size = unit(1,"line"),
legend.key.width = unit(3,"line"),
legend.text = element_text(size=13, face="bold"),
legend.title = element_text(size=18, face="bold"),
legend.position = "right",
#legend.direction = "vertcial",
axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
plot.title = element_text(size=25, face="bold", hjust = 0.5))
Raster version. Showing results at very coarse resolution. Risk of individual facilities in a 1km x 1km is difficult to see when looking at entire US.
= seq(0, 1, 0.1)
breaks
= brewer.pal(9, "YlOrRd")
mCols = c(rep("white",2), mCols[3:9])
mCols = colorRampPalette(mCols)(length(breaks)-1)
cr = colorRampPalette(cr,
cr bias = 3, space = "rgb")
= levelplot(Risk.stk,
M0 margin = FALSE,
layout=c(2, 1),
xlab = NULL,
ylab = NULL,
main=list("Individual Facility Risk", cex = 2, hjust = 0.5, col="gray40"),
names.attr = c("Week 20", "Week 52"),
maxpixels = 1e5,
col.regions = cr, at = breaks,
colorkey = list(labels=list(at=c(c(seq(0, 1, 0.2))),
labels=c(paste(seq(0, 1, 0.2))), cex=1.5),
labels=list(cex=12),
space = "bottom"),
par.strip.text = list(fontface='bold', cex=2),
par.settings = list(axis.line = list(col = "black"),
strip.background = list(col = 'transparent'),
strip.border = list(col = 'transparent')),
scales = list(cex = 1.5)) +
::layer(sp.polygons(States, col = "gray40", lwd = 1.5))
latticeExtra
M0
Tessellated Area
Defines study area for interpolation.
= spTransform(States.union, LL84)
States.union.ll
= gBuffer(States.union.ll,
States.bf width = 1,
byid = FALSE)
= inla.sp2segment(States.bf)
States.bnds
= spTransform(CommChick, LL84)
CommChick.ll
= 0.5
MaxEdge
= inla.mesh.2d(boundary = States.bnds,
mesh0 loc = CommChick.ll,
cutoff = 1,
max.edge = MaxEdge,
min.angle = 21)
= cbind(mesh0$loc[,1], mesh0$loc[,2])
MeshLocs
= as.data.frame(
xyz inla.mesh.map(MeshLocs,
projection = "longlat",
inverse = TRUE))
= 6371 #Earth radius, km
true.radius.of.earth = 1
radius.of.earth
= inla.mesh.create(loc = xyz,
mesh.dom cutoff = 1/true.radius.of.earth,
refine=list(max.edge = 150/true.radius.of.earth,
min.angle = 26))
Associate facility locations with tessellation
#convert to 3D coordinates
= as.data.frame(cbind(mesh.dom$loc[,1],
dd $loc[,2],
mesh.dom$loc[,3]))
mesh.dom
= as.data.frame(
dd inla.mesh.map(dd,
projection = "longlat",
inverse = FALSE))
names(dd) = c("Long", "Lat")
$Set = "Node"
dd$Risk_week20 = 0
dd$Risk_week52 = 0
dd
= CommChick.ll@data %>%
PD.set mutate(Set = "OBS",
Long = POINT_X,
Lat = POINT_Y) %>%
select(Long, Lat, Risk_week20, Risk_week52, Set)
= rbind(PD.set, dd)
Spatial.pnts
= cbind(Spatial.pnts$Long, Spatial.pnts$Lat)
locs
= inla.mesh.map(locs,
locs projection = "longlat",
inverse = TRUE)
= inla.spde.make.A(mesh.dom,
A.sp loc = locs)
= inla.spde2.pcmatern(mesh.dom, alpha = 2,
spde0 prior.range=c(1, 0.5),
prior.sigma=c(1, 0.5),
constr = TRUE)
= inla.spde.make.index("field.sp",
field.sp $n.spde) spde0
Logit Transform
For ease of interpretation, values are scaled to the range of 0 to 1. Because this is a restricted interval, a logit transformation is performed to normalize data.
= c(Spatial.pnts$Risk_week20, Spatial.pnts$Risk_week52)
MyTransform
= Scale(MyTransform)
MyTransform == 0] = 0.00001
MyTransform[MyTransform == 1] = 0.99999
MyTransform[MyTransform #range(MyTransform)
= log(MyTransform/(1-MyTransform))
t.MyTransform #range(t.MyTransform)
#plot(density(t.MyTransform))
$t.Risk20 = t.MyTransform[1:dim(Spatial.pnts)[1]]
Spatial.pnts$t.Risk52 = t.MyTransform[(dim(Spatial.pnts)[1]+1):length(t.MyTransform)] Spatial.pnts
Organize Data for Model:
= list(c(field.sp,
FE.lst list(intercept1 = 1)),
list(NN = Spatial.pnts[,"Long"]))
= inla.stack(data = list(Y = Spatial.pnts$t.Risk20), #Week 20 Risk
Sp20.stk A = list(A.sp, 1),
effects = FE.lst,
tag = "sp.20")
= inla.stack(data = list(Y = Spatial.pnts$t.Risk52), #Week 20 Risk
Sp52.stk A = list(A.sp, 1),
effects = FE.lst,
tag = "sp.52")
= Y ~ -1 + intercept1 +
Frm.J f(field.sp,
model = spde0)
.20 = inla(Frm.J,
SPdata = inla.stack.data(Sp20.stk),
family = "gaussian",
verbose = FALSE,
control.fixed = list(prec = 0.01,
prec.intercept = 0.001),
control.predictor = list(
A = inla.stack.A(Sp20.stk),
compute = TRUE,
link = 1),
#control.mode = list(restart = TRUE, theta = theta.jnc),
control.inla = list(strategy = "gaussian",
int.strategy = "eb"),
control.results = list(return.marginals.random = TRUE,
return.marginals.predictor = TRUE),
control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE))
.52 = inla(Frm.J,
SPdata = inla.stack.data(Sp52.stk),
family = "gaussian",
verbose = FALSE,
control.fixed = list(prec = 0.01,
prec.intercept = 0.001),
control.predictor = list(
A = inla.stack.A(Sp52.stk),
compute = TRUE,
link = 1),
#control.mode = list(restart = TRUE, theta = theta.jnc),
control.inla = list(strategy = "gaussian",
int.strategy = "eb"),
control.results = list(return.marginals.random = TRUE,
return.marginals.predictor = TRUE),
control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE))
Create point grid for mapping results
= projectRaster(Domain.r, crs=CRS(proj4string(CommChick.ll)))
Domain.r.ll
= disaggregate(Domain.r.ll, fact= 10)
Domain.r.ll
= rasterToPoints(Domain.r.ll, sp = TRUE)
Grd.pnt
@data = Grd.pnt@data %>%
Grd.pntmutate(Long = Grd.pnt@coords[,1],
Lat = Grd.pnt@coords[,2]) %>%
select(-layer)
Map Results:
= Grd.pnt
Map.pnts = SP.20
ModResult = SP.52
ModResult2 = mesh.dom
ModMesh
= cbind(Map.pnts$Long, Map.pnts$Lat)
pLoc
= inla.mesh.map(pLoc,
pLoc projection = "longlat",
inverse = TRUE)
= inla.spde.make.A(ModMesh, loc=pLoc)
Ap
$sRisk.Wk20 = (drop(Ap %*% ModResult$summary.random$field.sp$mean))
Map.pnts$sRisk.Wk52 = (drop(Ap %*% ModResult2$summary.random$field.sp$mean))
Map.pnts
= rasterize(Map.pnts,
sRisk_Wk20.r
Domain.r.ll, "sRisk.Wk20",
background = NA)
= rasterize(Map.pnts,
sRisk_Wk52.r
Domain.r.ll, "sRisk.Wk52",
background = NA)
View Risk with Spatial Smoothing
= stack(sRisk_Wk20.r, sRisk_Wk52.r)
Risk.stk.sp values(Risk.stk.sp) = Scale(values(Risk.stk.sp))
= spTransform(States, LL84)
States.ll
= seq(0, 1, 0.01)
breaks
#mCols = mCols = rev(viridis(1000)) #brewer.pal(9, "YlOrRd")
#mCols = c(rep("white",2), mCols[3:9])
= rev(inferno(1000))
mCols = c(rep("white",49), mCols[50:1000])
mCols = colorRampPalette(mCols)(length(breaks)-1)
cr = colorRampPalette(cr,
cr bias = 0.5, space = "rgb")
= levelplot(Risk.stk.sp,
M0 margin = FALSE,
layout=c(2, 1),
xlab = NULL,
ylab = NULL,
names.attr = c("Week 20", "Week 52"),
main=list("Spatially Smoothed Risk", cex = 2, hjust = 0.5, col="gray40"),
maxpixels = 1e5,
col.regions = cr, at = breaks,
colorkey = list(labels=list(at=c(c(seq(0, 1, 0.2))),
labels=c(paste(seq(0, 1, 0.2))), cex=1.5),
labels=list(cex=12),
space = "bottom"),
par.strip.text = list(fontface='bold', cex=2),
par.settings = list(axis.line = list(col = "black"),
strip.background = list(col = 'transparent'),
strip.border = list(col = 'transparent')),
scales = list(cex = 1.5)) +
::layer(sp.polygons(States.ll, col = "gray40", lwd = 1.5))
latticeExtra
M0