This code serves as an example of models and analyses developed to better understand spatial and temporal trends in the risk of avian influenza transmission between wild waterfowl and domestic poultry. This study is in early stages and should be considered preliminary and exploratory only.
CITATIONs REFERENCED IN THIS SCRIPT
Story Board: Avian Influenza in the United States:
https://usgs.maps.arcgis.com/apps/MapJournal/index.html?appid=37f0eacdaccf4c2fbe5f65535ddc7e8c
Prosser, D. J., Hungerford, L. L., Erwin, R. M., Ottinger, M. A., Takekawa, J. Y., & Ellis, E. C. (2013).
Mapping Avian Influenza Transmission Risk at the Interface of Domestic Poultry and Wild Birds.
Frontiers in Public Health, 1(August), 1–11. https://doi.org/10.3389/fpubh.2013.00028
Humphreys, J.H., J.M. Murrow, J.D. Sullivan, and D.J. Prosser, 2019, Seasonal occurrence and abundance of dabbling ducks across the continental United States: Joint spati‐temporal modeling for the Genus Anas. Diversity and Distributions https://onlinelibrary.wiley.com/doi/full/10.1111/ddi.12960
Humphreys, J.M., Murrow, J.L., Sullivan, J.D., and Prosser, D.J., 2019, Spatio-temporal distribution models for dabbling duck species across the continental United States: U.S. Geological Survey data release, https://doi.org/10.5066/P9DDR3OB.
Patyk, K. A., McCool-Eye, M. J., South, D. D., Burdett, C. L., Maroney, S. A., Fox, A., Kuiper, G., & Magzamen, S. (2020). Modelling the domestic poultry population in the United States: A novel approach leveraging remote sensing and synthetic data methods. Geospatial Health, 15(2). https://doi.org/10.4081/gh.2020.913
Define the domain boundaries for the study area.
LL84 = "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
Counties = readOGR(dsn = "D:/County_WNV/Arc/Counties",
layer = "Counties4",
stringsAsFactors = FALSE)## OGR data source with driver: ESRI Shapefile
## Source: "D:\County_WNV\Arc\Counties", layer: "Counties4"
## with 3109 features
## It has 73 fields
## Integer64 fields read as strings: OBJECTID Pop2010 Pop_18_24 HS18_24tot BD18_24tot Pop_25up HS25up_tot BD25up_tot Pop_18up HS18up_tot BD18up_tot
#States
LL84 = "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
States = map("state",
fill = TRUE,
plot = FALSE)
IDs = sapply(strsplit(States$names, ":"),
function(x) x[1])
States = map2SpatialPolygons(States, IDs = IDs,
proj4string = CRS(LL84))
pid = sapply(slot(States, "polygons"),
function(x) slot(x, "ID"))
p.df = data.frame(ID=1:length(States),
row.names = pid)
States = SpatialPolygonsDataFrame(States, p.df)
States = spTransform(States, CRS(proj4string(Counties)))
Cmap_df = fortify(Counties)## Regions defined for each Polygons
cStates = fortify(States)## Regions defined for each Polygons
ggplot(Cmap_df,
aes(long,lat, group=group)) +
geom_polygon(col="gray70", fill="gray90", size = 0.05) +
geom_polygon(data = cStates,
aes(long,lat, group=group),
col="gray50", fill = "transparent", size = 0.15) +
xlab(" ") +
ylab(" ") +
coord_equal() +
ggtitle("Study Area") +
theme(panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.background = element_blank(),
plot.background = element_blank(),
panel.border = element_blank(),
strip.text = element_text(size=15, face="bold"),
strip.background = element_blank(),
legend.key.size = unit(1,"line"),
legend.key.width = unit(3,"line"),
legend.text = element_text(size=13, face="bold"),
legend.title = element_text(size=18, face="bold"),
legend.position = "bottom",
axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
plot.title = element_text(size=25, face="bold", hjust = 0.5))Wild bird occurrence and abundance was originally modeled by season (spring, breeding, fall, winter). To ultimately produce weekly risk estimates, seasonal abundance estimates need to be disaggregated to weekly timesteps. To accomplish this, the originally used wild bird observation data (from eBird) is used to estimate the weekly probability of birds being at a particular latitude.
Bird Abundance estimates.
Season files available as part of a USGS data release:
Humphreys, J.M., Murrow, J.L., Sullivan, J.D., and Prosser, D.J., 2019, Spatio-temporal distribution models for dabbling duck species across the continental United States: U.S. Geological Survey data release, https://doi.org/10.5066/P9DDR3OB.
Copies have also been saved locally.
Create a raster stack from seasonal wild bird abundance estimates:
US.abun = list.files(path="D:/Dabbler_Pre/Dabbler_project/Raster_outputs",
pattern = "abun_", full.names = TRUE) #file names
Spp.lst = substr(US.abun, 52, 55)
unique(Spp.lst)## [1] "ambd" "amew" "buwt" "cint" "gadw" "gnwt" "mall" "motd" "norp" "nors"
## [11] "wood"
Seas.lst = substr(US.abun, 57, 60)
unique(Seas.lst)## [1] "Bree" "Fall" "Spri" "Wint"
Call.df = as.data.frame(
cbind(
Path = US.abun,
Spp = Spp.lst,
Seas = Seas.lst
)
)
Abund.stk = raster::stack(US.abun) #Stack rastersLoad eBird observations for all species:
eBird.call = list.files(path="D:/Dabbler_Pre/Dabbler_project/eBird_data/All_Observations",
full.names = TRUE)
eCall.df = as.data.frame(
cbind(
Path = eBird.call,
Spp = substr(eBird.call, 64, 67)))Create Weekly rasters for WINTER
Winter include Weeks 50 -53 and 1-10
CleanBird() is a custom function to clean eBird data. It removes unneeded columns, checks coordinates, etc.
Spp.vect = levels(as.factor(Call.df$Spp))
for(i in 1:length(Spp.vect)){
loop.spp = Spp.vect[i]
sppCall = Call.df %>% filter(Spp == loop.spp & Seas == "Wint")
Wint.rast = raster(paste(sppCall$Path))
Spp.df = eCall.df %>% filter(Spp == loop.spp)
CleanBird(paste(Spp.df$Path), "eTxt", 2016, 2017)
eTxt$Week = week(eTxt$Date)
eTxt = eTxt %>% filter(Week >= 50 | Week < 10)
head(eTxt)
Week.lvls = levels(factor(eTxt$Week))
NORS.grd = rasterToPoints(Wint.rast, sp = TRUE)
NORS.grd@data = NORS.grd@data %>%
mutate(Long = NORS.grd@coords[,1],
Lat = NORS.grd@coords[,2])
for(j in 1:length(Week.lvls)){
weekly.obs = eTxt %>% filter(Week == Week.lvls[j])
lat.mean = mean(weekly.obs$Lat)
lat.mean
lat.sd = sd(weekly.obs$Lat)*sqrt((length(weekly.obs$Lat)-1)/(length(weekly.obs$Lat)))
NORS.grd$Zs = (NORS.grd$Lat - lat.mean)/lat.sd
NORS.grd$Prob = pnorm(NORS.grd$Zs, lower.tail = FALSE)
tmp.r = rasterize(NORS.grd,
Wint.rast,
"Prob",
background = NA)
tmp2.r = tmp.r*Wint.rast
Label = paste(loop.spp, Week.lvls[j], sep="_")
writeRaster(tmp2.r, paste("D:/Interface_USDA/National/", Label,".tif", sep=""))
}
}Create Weekly rasters for SPRING
Spp.vect = levels(as.factor(Call.df$Spp))
for(i in 1:length(Spp.vect)){
loop.spp = Spp.vect[i]
sppCall = Call.df %>% filter(Spp == loop.spp & Seas == "Spri")
Wint.rast = raster(paste(sppCall$Path))
Spp.df = eCall.df %>% filter(Spp == loop.spp)
CleanBird(paste(Spp.df$Path), "eTxt", 2016, 2017)
eTxt$Week = week(eTxt$Date)
eTxt = eTxt %>% filter(Week >= 10 & Week < 20)
head(eTxt)
Week.lvls = levels(factor(eTxt$Week))
NORS.grd = rasterToPoints(Wint.rast, sp = TRUE)
NORS.grd@data = NORS.grd@data %>%
mutate(Long = NORS.grd@coords[,1],
Lat = NORS.grd@coords[,2])
for(j in 1:length(Week.lvls)){
weekly.obs = eTxt %>% filter(Week == Week.lvls[j])
lat.mean = mean(weekly.obs$Lat)
lat.mean
lat.sd = sd(weekly.obs$Lat)*sqrt((length(weekly.obs$Lat)-1)/(length(weekly.obs$Lat)))
NORS.grd$Zs = (NORS.grd$Lat - lat.mean)/lat.sd
NORS.grd$Prob = pnorm(NORS.grd$Zs, lower.tail = FALSE)
tmp.r = rasterize(NORS.grd,
Wint.rast,
"Prob",
background = NA)
tmp2.r = tmp.r*Wint.rast
Label = paste(loop.spp, Week.lvls[j], sep="_")
writeRaster(tmp2.r, paste("D:/Interface_USDA/National/", Label,".tif", sep=""), overwrite=TRUE)
}
}Create Weekly rasters for BREEDING season
Spp.vect = levels(as.factor(Call.df$Spp))
for(i in 1:length(Spp.vect)){
loop.spp = Spp.vect[i]
sppCall = Call.df %>% filter(Spp == loop.spp & Seas == "Bree")
Wint.rast = raster(paste(sppCall$Path))
Spp.df = eCall.df %>% filter(Spp == loop.spp)
CleanBird(paste(Spp.df$Path), "eTxt", 2016, 2017)
eTxt$Week = week(eTxt$Date)
eTxt = eTxt %>% filter(Week >= 20 & Week < 36)
head(eTxt)
Week.lvls = levels(factor(eTxt$Week))
NORS.grd = rasterToPoints(Wint.rast, sp = TRUE)
NORS.grd@data = NORS.grd@data %>%
mutate(Long = NORS.grd@coords[,1],
Lat = NORS.grd@coords[,2])
for(j in 1:length(Week.lvls)){
weekly.obs = eTxt %>% filter(Week == Week.lvls[j])
lat.mean = mean(weekly.obs$Lat)
lat.mean
lat.sd = sd(weekly.obs$Lat)*sqrt((length(weekly.obs$Lat)-1)/(length(weekly.obs$Lat)))
NORS.grd$Zs = (NORS.grd$Lat - lat.mean)/lat.sd
NORS.grd$Prob = pnorm(NORS.grd$Zs, lower.tail = FALSE)
tmp.r = rasterize(NORS.grd,
Wint.rast,
"Prob",
background = NA)
tmp2.r = tmp.r*Wint.rast
Label = paste(loop.spp, Week.lvls[j], sep="_")
writeRaster(tmp2.r, paste("D:/Interface_USDA/National/", Label,".tif", sep=""), overwrite=TRUE)
}
}Create Weekly rasters for FALL
Spp.vect = levels(as.factor(Call.df$Spp))
for(i in 1:length(Spp.vect)){
loop.spp = Spp.vect[i]
sppCall = Call.df %>% filter(Spp == loop.spp & Seas == "Bree")
Wint.rast = raster(paste(sppCall$Path))
Spp.df = eCall.df %>% filter(Spp == loop.spp)
CleanBird(paste(Spp.df$Path), "eTxt", 2016, 2017)
eTxt$Week = week(eTxt$Date)
eTxt = eTxt %>% filter(Week >= 36 & Week < 50)
head(eTxt)
Week.lvls = levels(factor(eTxt$Week))
NORS.grd = rasterToPoints(Wint.rast, sp = TRUE)
NORS.grd@data = NORS.grd@data %>%
mutate(Long = NORS.grd@coords[,1],
Lat = NORS.grd@coords[,2])
for(j in 1:length(Week.lvls)){
weekly.obs = eTxt %>% filter(Week == Week.lvls[j])
lat.mean = mean(weekly.obs$Lat)
lat.mean
lat.sd = sd(weekly.obs$Lat)*sqrt((length(weekly.obs$Lat)-1)/(length(weekly.obs$Lat)))
NORS.grd$Zs = (NORS.grd$Lat - lat.mean)/lat.sd
NORS.grd$Prob = pnorm(NORS.grd$Zs, lower.tail = FALSE)
tmp.r = rasterize(NORS.grd,
Wint.rast,
"Prob",
background = NA)
tmp2.r = tmp.r*Wint.rast
Label = paste(loop.spp, Week.lvls[j], sep="_")
writeRaster(tmp2.r, paste("D:/Interface_USDA/National/", Label,".tif", sep=""), overwrite=TRUE)
}
}Collate weekly rasters to estimate total abundance across all species.
Comb.abun = list.files(path="D:/Interface_USDA/National",
full.names = TRUE)
Week.lst = sub('.*_', "", Comb.abun)
Week.lst = gsub(".tif", "", Week.lst)
Week.df = as.data.frame(
cbind(
Path = Comb.abun,
Week = Week.lst
)
)
Week.df$WeekI = as.integer(as.character(Week.df$Week))
myWeeks = unique(Week.lst)for(i in 1:length(myWeeks)){
Week.set = Week.df %>% filter(Week == myWeeks[i])
Weeks.r = stack(paste(Week.set$Path))
Weeks.r = sum(Weeks.r)
writeRaster(Weeks.r, paste("D:/Interface_USDA/Wild_sums/WS_Week_", myWeeks[i],".tif", sep=""), overwrite=TRUE)
}Stack total abundance rasters created above.
Weekly_Abun = list.files(path="D:/Interface_USDA/Wild_sums",
full.names = TRUE)
Weekly_Abun = stack(Weekly_Abun)
dim(Weekly_Abun)## [1] 970 2306 53
names(Weekly_Abun) = paste("Week", myWeeks, sep="_")Check Result
Plot total abundance for Weeks 20 (SPRING) and 52 (WINTER)
Check.Wk20.r = Weekly_Abun[["Week_20"]]
Check.Wk52.r = Weekly_Abun[["Week_52"]]
Examples.r = stack(Check.Wk20.r, Check.Wk52.r)
Examples.r = projectRaster(Examples.r, crs = CRS(proj4string(States)))Examples.r## class : RasterBrick
## dimensions : 2178, 5288, 11517264, 2 (nrow, ncol, ncell, nlayers)
## resolution : 1100, 1400 (x, y)
## extent : -2895333, 2921467, -1389061, 1660139 (xmin, xmax, ymin, ymax)
## crs : +proj=aea +lat_0=37.5 +lon_0=-96 +lat_1=29.5 +lat_2=45.5 +x_0=0 +y_0=0 +datum=NAD83 +units=m +no_defs
## source : memory
## names : Week_20, Week_52
## min values : 0, 0
## max values : 293.43400, 64.54171
breaks = seq(0, 300, 0.1)
mCols = brewer.pal(9, "YlOrRd")
cr = colorRampPalette(mCols)(length(breaks)-1)
cr = colorRampPalette(cr, bias = 3, space = "rgb")
M0 = levelplot(Examples.r,
margin = FALSE,
layout=c(2, 1),
xlab = " ",
ylab = NULL,
#sub=" ",
main=list("Total Abundance (all species)", cex = 2, hjust = 0.5, col="gray40"),
#xlab.top = "C ", cex=20,
names.attr = c("Week 20", "Week 52"),
maxpixels = 1e5,
col.regions = cr, at = breaks,
colorkey = list(labels=list(at=c(seq(0,300, 50)),
labels=c(seq(0,300, 50)), cex=1.5),
labels=list(cex=12),
space = "bottom"),
par.strip.text = list(fontface='bold', cex=2),
par.settings = list(axis.line = list(col = "black"),
strip.background = list(col = 'transparent'),
strip.border = list(col = 'transparent')),
scales = list(cex = 1.5)) +
latticeExtra::layer(sp.polygons(States, col = "darkgray", lwd = 0.5))
M0Poultry data citation:
Patyk, K. A., McCool-Eye, M. J., South, D. D., Burdett, C. L., Maroney, S. A., Fox, A., Kuiper, G., & Magzamen, S. (2020). Modelling the domestic poultry population in the United States: A novel approach leveraging remote sensing and synthetic data methods. Geospatial Health, 15(2). https://doi.org/10.4081/gh.2020.913
Load data, convert to spatial points
nProj = proj4string(States)
Broilers.HM = read.csv("C:/Users/humph173/Documents/LabWork/Patuxent/HybridModel/broilers_hybrid_FINAL.csv",
stringsAsFactors=FALSE) %>%
select(POINT_X, POINT_Y, population, commodityType, category)
BYPullets.HM = read.csv("C:/Users/humph173/Documents/LabWork/Patuxent/HybridModel/layerpullets_hybrid_FINAL.csv",
stringsAsFactors=FALSE) %>%
mutate(POINT_X = longitude,
POINT_Y = latitude,
category = Category) %>%
select(POINT_X, POINT_Y, population, commodityType, category)
Layers.HM = read.csv("C:/Users/humph173/Documents/LabWork/Patuxent/HybridModel/layers_hybrid_FINAL.csv",
stringsAsFactors=FALSE) %>%
select(POINT_X, POINT_Y, population, commodityType, category)
Turkey.HM = read.csv("C:/Users/humph173/Documents/LabWork/Patuxent/HybridModel/turkeys_hybrid_FINAL.csv",
stringsAsFactors=FALSE) %>%
select(POINT_X, POINT_Y, population, commodityType, category)
Hybrid.df = rbind(Broilers.HM, BYPullets.HM, Layers.HM, Turkey.HM) #
Hybrid.pnt = SpatialPointsDataFrame(Hybrid.df[,c("POINT_X", "POINT_Y")], Hybrid.df)
proj4string(Hybrid.pnt) = LL84
Hybrid.pntp = spTransform(Hybrid.pnt, nProj)Calculate distance to nearest neighbor by commodity type.
unique(Hybrid.pntp$commodityType)## [1] "broilers" "layerpullets" "layers" "turkeys"
myBy = as.factor(Hybrid.pntp$commodityType)
facility.pp = as.ppp(Hybrid.pntp)
fac.distances = as.data.frame(spatstat::nndist(facility.pp, k= 1, by = myBy))
names(fac.distances) = paste("nn", names(fac.distances), sep=".")
Hybrid.pntp@data = cbind(Hybrid.pntp@data, fac.distances)Cmap_df = fortify(Counties)## Regions defined for each Polygons
cStates = fortify(States)## Regions defined for each Polygons
cPoints = Hybrid.pntp@data %>%
mutate(pLong = Hybrid.pntp@coords[,1],
pLat = Hybrid.pntp@coords[,2])
cPoints$Set = ifelse(cPoints$category == "BY", "Backyard", "Commercial")
ggplot(Cmap_df,
aes(long,lat, group=group)) +
geom_polygon(col="gray70", fill="gray90", size = 0.05) +
geom_polygon(data = cStates,
aes(long,lat, group=group),
col="gray50", fill = "transparent", size = 0.15) +
geom_point(data = cPoints,
aes(pLong, pLat, group=NA),
col="red", pch=1, cex = 0.15) +
xlab(" ") +
ylab(" ") +
facet_wrap(commodityType~Set, ncol=4) +
coord_equal() +
ggtitle("Hybrid Model Poultry") +
theme(panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.background = element_blank(),
plot.background = element_blank(),
panel.border = element_blank(),
strip.text = element_text(size=15, face="bold"),
strip.background = element_blank(),
legend.key.size = unit(1,"line"),
legend.key.width = unit(3,"line"),
legend.text = element_text(size=13, face="bold"),
legend.title = element_text(size=18, face="bold"),
legend.position = "bottom",
axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
plot.title = element_text(size=25, face="bold", hjust = 0.5))Formula adapted from Prosser et al., 2013:
Prosser, D. J., Hungerford, L. L., Erwin, R. M., Ottinger, M. A., Takekawa, J. Y., & Ellis, E. C. (2013).
Mapping Avian Influenza Transmission Risk at the Interface of Domestic Poultry and Wild Birds.
Frontiers in Public Health, 1(August), 1–11. https://doi.org/10.3389/fpubh.2013.00028
EXAMPLE
Example Level 1 Formula:
Tw to Pte = [WEF * Vwf] * [Pte * U]
Where,
Tw = Wild Waterfowl (wild bird models, all species summed)
Pte = Terrestrial poultry (commercial chicken in this example)
WEF = Week-specific Effective waterfowl population (prevalence, used 0.3 in this example)
Vwf = Viral shedding rates for waterfowl (used 104.5 in this example)
U = Poultry virus uptake rate and infection load (10-15/103.5 in this example)
Find the wild bird abundance at each poultry facility for weeks 20 and 52:
Hybrid.pntp$T_w_20 = extract(Examples.r[["Week_20"]], Hybrid.pntp) #Week 20
Hybrid.pntp$T_w_20[is.na(Hybrid.pntp$T_w_20)] = 0
#range(Hybrid.pntp$T_w_20) #birds/square km
Hybrid.pntp$T_w_52 = extract(Examples.r[["Week_52"]], Hybrid.pntp) #Week 52
Hybrid.pntp$T_w_52[is.na(Hybrid.pntp$T_w_52)] = 0
#range(Hybrid.pntp$T_w_52) #birds/square kmCalculate effective waterfowl population size.
Assuming prevalence of 0.30 for this example, actual values being estimated/mapped as part of the project.
Hybrid.pntp$W_EF_20 = Hybrid.pntp$T_w_20*0.3 #Effective population Week 20
Hybrid.pntp$W_EF_52 = Hybrid.pntp$T_w_52*0.3 #Effective population Week 52Set Additional Variable Values:
Hybrid.pntp$V_wf = 10^4.5 #Waterfowl virus shedding rate, value from literature
Hybrid.pntp$U = 10^15/10^3.5 #Poultry uptake rate, values from literatureApply example Tw to Pte Risk formula:
Hybrid.pntp$Risk_week20 = (Hybrid.pntp$W_EF_20*Hybrid.pntp$V_wf)*(Hybrid.pntp$population*Hybrid.pntp$U) #Week 20
Hybrid.pntp$Risk_week52 = (Hybrid.pntp$W_EF_52*Hybrid.pntp$V_wf)*(Hybrid.pntp$population*Hybrid.pntp$U) #Week 52Create raster version of study area:
States.union = gUnaryUnion(
gBuffer(Counties, width=0, byid = F))
Domain.r = rasterize(States.union,
Examples.r[[1]],
field = 0,
background = NA)
Domain.r = aggregate(Domain.r, fact=20) #Very coarse resolution to see whole USSelect Commercial Chicken:
unique(Hybrid.pntp$commodityType)## [1] "broilers" "layerpullets" "layers" "turkeys"
unique(Hybrid.pntp$category)## [1] "BY" "COML" "COMH" "COM" "SE"
CommChick = subset(Hybrid.pntp, commodityType == "layers" & category != "BY" )Map Estimated Risk to Raster:
Risk20.r = rasterize(CommChick,
Domain.r,
"Risk_week20",
background = 0)
Risk52.r = rasterize(CommChick,
Domain.r,
"Risk_week52",
background = 0)
Risk.stk = stack(Risk20.r, Risk52.r)
names(Risk.stk) = c("Week.20", "Week.52")
Risk.stk = Risk.stk + Domain.rScaling values to the range 0-1 for ease of interpretation:
values(Risk.stk) = Scale(values(Risk.stk))Results by facility.
Cmap_df = fortify(Counties)## Regions defined for each Polygons
Cmap_df2 = Cmap_df
Cmap_df$variable = "Risk Week 20"
Cmap_df2$variable = "Risk Week 52"
Cmap_df = rbind(Cmap_df, Cmap_df2)
cStates = fortify(States)## Regions defined for each Polygons
cStates2 = cStates
cStates$variable = "Risk Week 20"
cStates2$variable = "Risk Week 52"
cStates = rbind(cStates, cStates2)
cPoints = CommChick@data %>%
mutate(pLong = CommChick@coords[,1],
pLat = CommChick@coords[,2]) %>%
select(pLong, pLat, Risk_week20, Risk_week52, category)
cPoints = melt(cPoints, c("pLong", "pLat", "category"))
cPoints$Risk = Scale(as.numeric(cPoints$value))
cPoints$Risk[is.na(cPoints$Risk)] = 0
cPoints$variable = gsub("_", " ", cPoints$variable)
cPoints$variable = gsub("w", "W", cPoints$variable)
cPoints$variable = gsub("20", " 20", cPoints$variable)
cPoints$variable = gsub("52", " 52", cPoints$variable)
ggplot(Cmap_df,
aes(long,lat, group=group)) +
geom_polygon(col="gray70", fill="gray90", size = 0.05) +
geom_polygon(data = cStates,
aes(long,lat, group=group),
col="gray50", fill = "transparent", size = 0.15) +
geom_point(data = cPoints,
aes(pLong, pLat, group=NA, col = Risk, size = Risk),
pch=1) +
scale_color_continuous(low = "blue", high = "red") +
xlab(" ") +
ylab(" ") +
facet_wrap(~variable, ncol=2) +
coord_equal() +
ggtitle("Individual Facility Risk") +
theme(panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.background = element_blank(),
plot.background = element_blank(),
panel.border = element_blank(),
strip.text = element_text(size=15, face="bold"),
strip.background = element_blank(),
legend.key.size = unit(1,"line"),
legend.key.width = unit(3,"line"),
legend.text = element_text(size=13, face="bold"),
legend.title = element_text(size=18, face="bold"),
legend.position = "right",
#legend.direction = "vertcial",
axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
plot.title = element_text(size=25, face="bold", hjust = 0.5))Raster version. Showing results at very coarse resolution. Risk of individual facilities in a 1km x 1km is difficult to see when looking at entire US.
breaks = seq(0, 1, 0.1)
mCols = brewer.pal(9, "YlOrRd")
mCols = c(rep("white",2), mCols[3:9])
cr = colorRampPalette(mCols)(length(breaks)-1)
cr = colorRampPalette(cr,
bias = 3, space = "rgb")
M0 = levelplot(Risk.stk,
margin = FALSE,
layout=c(2, 1),
xlab = NULL,
ylab = NULL,
main=list("Individual Facility Risk", cex = 2, hjust = 0.5, col="gray40"),
names.attr = c("Week 20", "Week 52"),
maxpixels = 1e5,
col.regions = cr, at = breaks,
colorkey = list(labels=list(at=c(c(seq(0, 1, 0.2))),
labels=c(paste(seq(0, 1, 0.2))), cex=1.5),
labels=list(cex=12),
space = "bottom"),
par.strip.text = list(fontface='bold', cex=2),
par.settings = list(axis.line = list(col = "black"),
strip.background = list(col = 'transparent'),
strip.border = list(col = 'transparent')),
scales = list(cex = 1.5)) +
latticeExtra::layer(sp.polygons(States, col = "gray40", lwd = 1.5))
M0Tessellated Area
Defines study area for interpolation.
States.union.ll = spTransform(States.union, LL84)
States.bf = gBuffer(States.union.ll,
width = 1,
byid = FALSE)
States.bnds = inla.sp2segment(States.bf)
CommChick.ll = spTransform(CommChick, LL84)
MaxEdge = 0.5
mesh0 = inla.mesh.2d(boundary = States.bnds,
loc = CommChick.ll,
cutoff = 1,
max.edge = MaxEdge,
min.angle = 21)
MeshLocs = cbind(mesh0$loc[,1], mesh0$loc[,2])
xyz = as.data.frame(
inla.mesh.map(MeshLocs,
projection = "longlat",
inverse = TRUE))
true.radius.of.earth = 6371 #Earth radius, km
radius.of.earth = 1
mesh.dom = inla.mesh.create(loc = xyz,
cutoff = 1/true.radius.of.earth,
refine=list(max.edge = 150/true.radius.of.earth,
min.angle = 26))Associate facility locations with tessellation
#convert to 3D coordinates
dd = as.data.frame(cbind(mesh.dom$loc[,1],
mesh.dom$loc[,2],
mesh.dom$loc[,3]))
dd = as.data.frame(
inla.mesh.map(dd,
projection = "longlat",
inverse = FALSE))
names(dd) = c("Long", "Lat")
dd$Set = "Node"
dd$Risk_week20 = 0
dd$Risk_week52 = 0
PD.set = CommChick.ll@data %>%
mutate(Set = "OBS",
Long = POINT_X,
Lat = POINT_Y) %>%
select(Long, Lat, Risk_week20, Risk_week52, Set)
Spatial.pnts = rbind(PD.set, dd)
locs = cbind(Spatial.pnts$Long, Spatial.pnts$Lat)
locs = inla.mesh.map(locs,
projection = "longlat",
inverse = TRUE)
A.sp = inla.spde.make.A(mesh.dom,
loc = locs)
spde0 = inla.spde2.pcmatern(mesh.dom, alpha = 2,
prior.range=c(1, 0.5),
prior.sigma=c(1, 0.5),
constr = TRUE)
field.sp = inla.spde.make.index("field.sp",
spde0$n.spde)Logit Transform
For ease of interpretation, values are scaled to the range of 0 to 1. Because this is a restricted interval, a logit transformation is performed to normalize data.
MyTransform = c(Spatial.pnts$Risk_week20, Spatial.pnts$Risk_week52)
MyTransform = Scale(MyTransform)
MyTransform[MyTransform == 0] = 0.00001
MyTransform[MyTransform == 1] = 0.99999
#range(MyTransform)
t.MyTransform = log(MyTransform/(1-MyTransform))
#range(t.MyTransform)
#plot(density(t.MyTransform))
Spatial.pnts$t.Risk20 = t.MyTransform[1:dim(Spatial.pnts)[1]]
Spatial.pnts$t.Risk52 = t.MyTransform[(dim(Spatial.pnts)[1]+1):length(t.MyTransform)]Organize Data for Model:
FE.lst = list(c(field.sp,
list(intercept1 = 1)),
list(NN = Spatial.pnts[,"Long"]))
Sp20.stk = inla.stack(data = list(Y = Spatial.pnts$t.Risk20), #Week 20 Risk
A = list(A.sp, 1),
effects = FE.lst,
tag = "sp.20")
Sp52.stk = inla.stack(data = list(Y = Spatial.pnts$t.Risk52), #Week 20 Risk
A = list(A.sp, 1),
effects = FE.lst,
tag = "sp.52")
Frm.J = Y ~ -1 + intercept1 +
f(field.sp,
model = spde0)
SP.20 = inla(Frm.J,
data = inla.stack.data(Sp20.stk),
family = "gaussian",
verbose = FALSE,
control.fixed = list(prec = 0.01,
prec.intercept = 0.001),
control.predictor = list(
A = inla.stack.A(Sp20.stk),
compute = TRUE,
link = 1),
#control.mode = list(restart = TRUE, theta = theta.jnc),
control.inla = list(strategy = "gaussian",
int.strategy = "eb"),
control.results = list(return.marginals.random = TRUE,
return.marginals.predictor = TRUE),
control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE))
SP.52 = inla(Frm.J,
data = inla.stack.data(Sp52.stk),
family = "gaussian",
verbose = FALSE,
control.fixed = list(prec = 0.01,
prec.intercept = 0.001),
control.predictor = list(
A = inla.stack.A(Sp52.stk),
compute = TRUE,
link = 1),
#control.mode = list(restart = TRUE, theta = theta.jnc),
control.inla = list(strategy = "gaussian",
int.strategy = "eb"),
control.results = list(return.marginals.random = TRUE,
return.marginals.predictor = TRUE),
control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE)) Create point grid for mapping results
Domain.r.ll = projectRaster(Domain.r, crs=CRS(proj4string(CommChick.ll)))
Domain.r.ll = disaggregate(Domain.r.ll, fact= 10)
Grd.pnt = rasterToPoints(Domain.r.ll, sp = TRUE)
Grd.pnt@data = Grd.pnt@data %>%
mutate(Long = Grd.pnt@coords[,1],
Lat = Grd.pnt@coords[,2]) %>%
select(-layer)Map Results:
Map.pnts = Grd.pnt
ModResult = SP.20
ModResult2 = SP.52
ModMesh = mesh.dom
pLoc = cbind(Map.pnts$Long, Map.pnts$Lat)
pLoc = inla.mesh.map(pLoc,
projection = "longlat",
inverse = TRUE)
Ap = inla.spde.make.A(ModMesh, loc=pLoc)
Map.pnts$sRisk.Wk20 = (drop(Ap %*% ModResult$summary.random$field.sp$mean))
Map.pnts$sRisk.Wk52 = (drop(Ap %*% ModResult2$summary.random$field.sp$mean))
sRisk_Wk20.r = rasterize(Map.pnts,
Domain.r.ll,
"sRisk.Wk20",
background = NA)
sRisk_Wk52.r = rasterize(Map.pnts,
Domain.r.ll,
"sRisk.Wk52",
background = NA)View Risk with Spatial Smoothing
Risk.stk.sp = stack(sRisk_Wk20.r, sRisk_Wk52.r)
values(Risk.stk.sp) = Scale(values(Risk.stk.sp))
States.ll = spTransform(States, LL84)
breaks = seq(0, 1, 0.01)
#mCols = mCols = rev(viridis(1000)) #brewer.pal(9, "YlOrRd")
#mCols = c(rep("white",2), mCols[3:9])
mCols = rev(inferno(1000))
mCols = c(rep("white",49), mCols[50:1000])
cr = colorRampPalette(mCols)(length(breaks)-1)
cr = colorRampPalette(cr,
bias = 0.5, space = "rgb")
M0 = levelplot(Risk.stk.sp,
margin = FALSE,
layout=c(2, 1),
xlab = NULL,
ylab = NULL,
names.attr = c("Week 20", "Week 52"),
main=list("Spatially Smoothed Risk", cex = 2, hjust = 0.5, col="gray40"),
maxpixels = 1e5,
col.regions = cr, at = breaks,
colorkey = list(labels=list(at=c(c(seq(0, 1, 0.2))),
labels=c(paste(seq(0, 1, 0.2))), cex=1.5),
labels=list(cex=12),
space = "bottom"),
par.strip.text = list(fontface='bold', cex=2),
par.settings = list(axis.line = list(col = "black"),
strip.background = list(col = 'transparent'),
strip.border = list(col = 'transparent')),
scales = list(cex = 1.5)) +
latticeExtra::layer(sp.polygons(States.ll, col = "gray40", lwd = 1.5))
M0