library(magrittr)
library(utils)
library(ggplot2)
library(sf)
library(sp)
library(rgdal)
library(dplyr)
library(kableExtra)
### Prepare data
load("cases.RData")
missing <- data.frame()
missing_LA <- list()
for (i in 2003:2019) {
o <- cases[cases$Year == i, ]$Observed %>%
is.na() %>%
which() %>%
length()
e <- cases[cases$Year == i, ]$Expected %>%
is.na() %>%
which() %>%
length()
s <- cases[cases$Year == i, ]$SAR %>%
is.na() %>%
which() %>%
length()
p <- cases[cases$Year == i, ]$Population %>%
is.na() %>%
which() %>%
length()
row <- data.frame(Year = i, Observed = o, Expected = e, SAR = s, Population = p)
missing <- rbind(missing, row)
which <- cases[cases$Year == i, ]$SAR %>%
is.na() %>%
which()
missing_LA <- c(missing_LA, list(cases[cases$Year == i, ][which, ]$Level.description))
}
kable(missing, caption = "Missing Observations") %>% kable_styling(., full_width = TRUE)
| Year | Observed | Expected | SAR | Population |
|---|---|---|---|---|
| 2003 | 120 | 120 | 120 | 0 |
| 2004 | 99 | 99 | 99 | 0 |
| 2005 | 114 | 114 | 114 | 0 |
| 2006 | 127 | 127 | 127 | 0 |
| 2007 | 108 | 108 | 108 | 0 |
| 2008 | 77 | 77 | 77 | 0 |
| 2009 | 105 | 105 | 105 | 0 |
| 2010 | 129 | 129 | 129 | 0 |
| 2011 | 115 | 115 | 115 | 0 |
| 2012 | 73 | 73 | 73 | 0 |
| 2013 | 0 | 0 | 0 | 0 |
| 2014 | 0 | 0 | 0 | 0 |
| 2015 | 0 | 0 | 0 | 0 |
| 2016 | 0 | 0 | 0 | 0 |
| 2017 | 0 | 0 | 0 | 0 |
| 2018 | 0 | 0 | 0 | 0 |
| 2019 | 0 | 0 | 0 | 0 |
cases <- cases[cases$Year >= 2013, ]
### Observed/expected table (for knitr)
kable(
cases %>%
group_by(Year) %>%
summarise(`Observed` = format(sum(Observed), big.mark=","), `Expected` = format(sum(Expected), big.mark=",")),
booktabs = TRUE,
caption = "Aggregated Admissions"
) %>% kable_styling(., full_width = TRUE)
| Year | Observed | Expected |
|---|---|---|
| 2013 | 42,399 | 44,258.5 |
| 2014 | 45,018 | 43,732.9 |
| 2015 | 49,738 | 43,690.5 |
| 2016 | 52,879 | 43,984.1 |
| 2017 | 50,710 | 43,316.6 |
| 2018 | 56,345 | 42,555 |
| 2019 | 56,505 | 41,570 |
temp<-cases %>% group_by(Year) %>% summarise(`Observed` = sum(Observed), `Expected` = sum(Expected))
ggplot(temp, aes(x=Year)) +
geom_line(aes(y = Observed), color = "darkred") +
geom_line(aes(y = Expected), color="steelblue") +
scale_color_manual(name = "Admissions", values = c("Observed" = "darkred", "Expected" = "steelblue"))
load("pollution.RData")
P25<- P25[P25$Year >= 2013,]
mu = mean(NO2$NO2)
sd = sd(NO2$NO2)
hist(P25$PM25,
main="Concentration of PM2.5",
xlab="µg/m3",
col="yellow")
abline(v=mean(P25$PM25), col="blue")
hist(NO2$NO2,
main="Concentration of NO2",
xlab="µg/m3",
col="yellow")
abline(v=mu, col="red")
Maps of SAR by LTLA by year
# Create colour pallette
library(spdep)
# Read shapefile
shapefile_path <- "../Boundaries (2019)/England/Local_Authority_Districts_December_2019_England.shp"
map <- st_read(shapefile_path)
## Reading layer `Local_Authority_Districts_December_2019_England' from data source `/Users/cam/Library/CloudStorage/OneDrive-ImperialCollegeLondon/LRTI project/Boundaries (2019)/England/Local_Authority_Districts_December_2019_England.shp'
## using driver `ESRI Shapefile'
## Simple feature collection with 317 features and 11 fields
## Geometry type: MULTIPOLYGON
## Dimension: XY
## Bounding box: xmin: -714517.9 ymin: 6422874 xmax: 196334.8 ymax: 7520900
## Projected CRS: WGS 84 / Pseudo-Mercator
map <- map[map$lad19nm != "Isles of Scilly",]
# Define the neighbors and create the weights list
map_nb <- poly2nb(map)
summary(map_nb)
## Neighbour list object:
## Number of regions: 316
## Number of nonzero links: 1610
## Percentage nonzero weights: 1.612322
## Average number of links: 5.094937
## 1 region with no links:
## 44
## Link number distribution:
##
## 0 1 2 3 4 5 6 7 8 9 10 11
## 1 8 25 39 54 58 46 49 21 10 3 2
## 8 least connected regions:
## 10 26 61 67 88 89 115 263 with 1 link
## 2 most connected regions:
## 51 227 with 11 links
# Convert to INLA format
nb2INLA("map.graph", map_nb)
map.adj <- paste(getwd(), "/map.graph", sep = "")
Produce a spatial map of the SARs by year
library(RColorBrewer)
SARcolors <- c("#D0F0C0", brewer.pal(4, "Reds"))
#create bins for fill colour
cases$SAR.cut <- cut(
cases$SAR,
breaks = c(min(cases$SAR[!is.na(cases$SAR)]), 1, 2.5, 5, max(cases$SAR[!is.na(cases$SAR)])),
)
# Prep data (do I need to add PM2.5 here? - depends if I uses map_i later)
for (i in 2013:2019) {
d <- cases[cases$Year == i, ]
map_i <- left_join(map, d, by = join_by(lad19cd == Level))
p <- ggplot() +
geom_sf(data = map_i) +
aes(fill = SAR.cut) +
theme_bw() +
scale_fill_manual(values = SARcolors) +
guides(fill = guide_legend(title = "SAR")) +
labs(title = paste0("SAR in ", i))
print(p)
}
#fix the colors
P25colors <- brewer.pal(7, "Blues")
#create bins for fill colour
P25$PM25.cut <- cut(
P25$PM25,
breaks = c(min(P25$PM25[!is.na(P25$PM25)]), 7.5, 10, 12.5, 15, 17.5, max(P25$PM25[!is.na(P25$PM25)])),
)
for (i in 2013:2019) {
d <- P25[P25$Year == i,]
map_i <- left_join(map, d, by = join_by(lad19cd, lad19nm, bng_e, bng_n, long, lat))
p <- ggplot() +
geom_sf(data = map_i) +
aes(fill = PM25.cut) +
theme_bw() +
scale_fill_manual(values = P25colors) +
guides(fill = guide_legend(title = "PM2.5")) +
labs(title = paste0("PM2.5 in ", i))
print(p)
}
#fix the colors
NO2colors <- brewer.pal(7, "Purples")
#create bins for fill colour
NO2$NO2.cut <- cut(
NO2$NO2,
breaks = c(min(NO2$NO2[!is.na(NO2$NO2)]), 13, 24, 34, 44, 55, max(NO2$NO2[!is.na(NO2$NO2)])),
)
for (i in 2013:2019) {
d <- NO2[NO2$Year == i,]
map_i <- left_join(map, d, by = join_by(lad19cd, lad19nm, bng_e, bng_n, long, lat))
p <- ggplot() +
geom_sf(data = map_i) +
aes(fill = NO2.cut) +
theme_bw() +
scale_fill_manual(values = NO2colors) +
guides(fill = guide_legend(title = "NO2")) +
labs(title = paste0("NO2 in ", i))
print(p)
}
#London close-up
Now we build the models
# Aggregate observed and expected cases by LA (independent of year)
cases_agg <- cases %>%
group_by(Level) %>%
summarise(Observed = sum(Observed), Expected = sum(Expected), Population = sum(Population)) %>%
mutate(SAR = Observed / Expected) %>%
mutate(log.Population = log(Population)) %>%
dplyr::rename(O = Observed, E = Expected)
#round up/down
cases_agg[cases_agg$Level == "E09000001",]$O <- cases_agg[cases_agg$Level == "E09000012",]$O %>% floor()
cases_agg[cases_agg$Level == "E09000012",]$O <- cases_agg[cases_agg$Level == "E09000012",]$O %>% ceiling()
PolP25 <- P25[P25$Year >= 2013,] %>% group_by(lad19cd) %>% summarise(PM2.5 = mean(PM25))
PolNO2 <- NO2[NO2$Year >= 2013,] %>% group_by(lad19cd) %>% summarise(NO2 = mean(NO2))
#merge; 'data' is cases_agg combined with pm2.5
data<-merge(cases_agg, PolP25, by.x="Level", by.y="lad19cd")
data<-merge(data, PolNO2, by.x="Level", by.y="lad19cd")
Starting with the fixed environmental effects
#correlation
# Calculate the correlation matrix
temp<-left_join(P25, NO2, by=c("lad19nm", "Year")) %>% select(lad19nm, Year, PM25, NO2)
corr_matrix <- cor(temp[,3:4])
print(corr_matrix)
## PM25 NO2
## PM25 1.0000000 0.7452461
## NO2 0.7452461 1.0000000
library(INLA)
formula <- O ~ PM2.5
fixed.model <- inla(formula, family="poisson", data=data, control.compute = list(waic = TRUE))
fixed.model$summary.fixed
## mean sd 0.025quant 0.5quant 0.975quant
## (Intercept) 7.3272231 0.010352538 7.30693254 7.3272231 7.34751375
## PM2.5 -0.0319657 0.001067797 -0.03405855 -0.0319657 -0.02987286
## mode kld
## (Intercept) 7.3272231 2.431918e-11
## PM2.5 -0.0319657 6.114588e-14
summary(fixed.model, summary.stat = c("mean", "p0.975", "p0.025", "p0.5in", "p0.5out", "waic"))
##
## Call:
## c("inla.core(formula = formula, family = family, contrasts = contrasts,
## ", " data = data, quantiles = quantiles, E = E, offset = offset, ", "
## scale = scale, weights = weights, Ntrials = Ntrials, strata = strata,
## ", " lp.scale = lp.scale, link.covariates = link.covariates, verbose =
## verbose, ", " lincomb = lincomb, selection = selection, control.compute
## = control.compute, ", " control.predictor = control.predictor,
## control.family = control.family, ", " control.inla = control.inla,
## control.fixed = control.fixed, ", " control.mode = control.mode,
## control.expert = control.expert, ", " control.hazard = control.hazard,
## control.lincomb = control.lincomb, ", " control.update =
## control.update, control.lp.scale = control.lp.scale, ", "
## control.pardiso = control.pardiso, only.hyperparam = only.hyperparam,
## ", " inla.call = inla.call, inla.arg = inla.arg, num.threads =
## num.threads, ", " keep = keep, working.directory = working.directory,
## silent = silent, ", " inla.mode = inla.mode, safe = FALSE, debug =
## debug, .parent.frame = .parent.frame)" )
## Time used:
## Pre = 0.663, Running = 0.167, Post = 0.00932, Total = 0.839
## Fixed effects:
## mean sd 0.025quant 0.5quant 0.975quant mode kld
## (Intercept) 7.327 0.010 7.307 7.327 7.348 7.327 0
## PM2.5 -0.032 0.001 -0.034 -0.032 -0.030 -0.032 0
##
## Watanabe-Akaike information criterion (WAIC) ...: 4476985.65
## Effective number of parameters .................: 2228372.47
##
## Marginal log-Likelihood: -88977.16
## is computed
## Posterior summaries for the linear predictor and the fitted values are computed
## (Posterior marginals needs also 'control.compute=list(return.marginals.predictor=TRUE)')
formula <- O ~ NO2
fixed.model <- inla(formula, family="poisson", data=data, control.compute = list(waic = TRUE))
fixed.model$summary.fixed
## mean sd 0.025quant 0.5quant 0.975quant mode
## (Intercept) 6.75614485 0.0038297071 6.74863876 6.75614485 6.7636509 6.75614485
## NO2 0.01878669 0.0002373536 0.01832149 0.01878669 0.0192519 0.01878669
## kld
## (Intercept) 0
## NO2 0
summary(fixed.model, summary.stat = c("mean", "p0.975", "p0.025", "p0.5in", "p0.5out", "waic"))
##
## Call:
## c("inla.core(formula = formula, family = family, contrasts = contrasts,
## ", " data = data, quantiles = quantiles, E = E, offset = offset, ", "
## scale = scale, weights = weights, Ntrials = Ntrials, strata = strata,
## ", " lp.scale = lp.scale, link.covariates = link.covariates, verbose =
## verbose, ", " lincomb = lincomb, selection = selection, control.compute
## = control.compute, ", " control.predictor = control.predictor,
## control.family = control.family, ", " control.inla = control.inla,
## control.fixed = control.fixed, ", " control.mode = control.mode,
## control.expert = control.expert, ", " control.hazard = control.hazard,
## control.lincomb = control.lincomb, ", " control.update =
## control.update, control.lp.scale = control.lp.scale, ", "
## control.pardiso = control.pardiso, only.hyperparam = only.hyperparam,
## ", " inla.call = inla.call, inla.arg = inla.arg, num.threads =
## num.threads, ", " keep = keep, working.directory = working.directory,
## silent = silent, ", " inla.mode = inla.mode, safe = FALSE, debug =
## debug, .parent.frame = .parent.frame)" )
## Time used:
## Pre = 0.554, Running = 0.167, Post = 0.00432, Total = 0.725
## Fixed effects:
## mean sd 0.025quant 0.5quant 0.975quant mode kld
## (Intercept) 6.756 0.004 6.749 6.756 6.764 6.756 0
## NO2 0.019 0.000 0.018 0.019 0.019 0.019 0
##
## Watanabe-Akaike information criterion (WAIC) ...: 6329034.51
## Effective number of parameters .................: 3154754.22
##
## Marginal log-Likelihood: -86503.09
## is computed
## Posterior summaries for the linear predictor and the fitted values are computed
## (Posterior marginals needs also 'control.compute=list(return.marginals.predictor=TRUE)')
PM2.5 and NO2 have similar CIs. However, the WAIC for PM2.5 is much lower (4,476,985.65 vs 6,329,034.51), so we select PM2.5 to represent outdoor air pollution.
Produce a spatial map of the aggregated SARs (non-smoothed RRs)
# Create buckets for SAR
##FIX
cases_agg$SAR.cut <- cut(
cases_agg$SAR,
breaks = c(min(cases_agg$SAR[!is.na(cases_agg$SAR)]), 0.6, 1, 1.4, max(cases_agg$SAR[!is.na(cases_agg$SAR)])),
)
# Produce spatial map of aggregated SAR
# map_agg is map joined with cases_agg
map_agg <- left_join(map, cases_agg, by = join_by(lad19cd == Level))
# Plot SAR using ggplot
ggplot() +
geom_sf(data = map_agg, col = NA) +
aes(fill = SAR.cut) +
theme_bw() +
scale_fill_manual(values = SARcolors) +
guides(fill = guide_legend(title = "SAR")) +
labs(title = paste0("SAR"))
Fit hierarchical Poisson log-linear model in INLA Use a bym2 prior for the spatial random effect setting the PC priors (Simpson et al. 2017) seen in Session 2.2. Specifies the combination of the intrinsic conditional autoregressive structure and unstructured random effect as described before
ICAR model makes a strong spatial assumption; it cannot take a limiting form that allows non-spatial variabilit Besag, York and Mollie (BYM) recommended combining the ICAR prior and the standard normal prior to allow for both – spatially unstructured latent covariates modelled as iid → global smoothing – spatially correlated latent covariates modelled as ICAR → local smoothing
LA <- seq(1, 316)
formula_BYM2 <- O ~ f(LA, model = "bym2", graph = map.adj, hyper = list(
prec = list(
prior = "pc.prec",
param = c(0.5 / 0.31, 0.01)
),
phi = list(
prior = "pc",
param = c(0.5, 2 / 3)
)
))
sBYM.model <- inla(formula = formula_BYM2, family = "poisson", data = data, E = E, control.compute = list(waic = TRUE))
# Obtain the posterior summary statistics
summary(sBYM.model, summary.stat = c("mean", "p0.975", "p0.025", "p0.5in", "p0.5out", "waic"))
##
## Call:
## c("inla.core(formula = formula, family = family, contrasts = contrasts,
## ", " data = data, quantiles = quantiles, E = E, offset = offset, ", "
## scale = scale, weights = weights, Ntrials = Ntrials, strata = strata,
## ", " lp.scale = lp.scale, link.covariates = link.covariates, verbose =
## verbose, ", " lincomb = lincomb, selection = selection, control.compute
## = control.compute, ", " control.predictor = control.predictor,
## control.family = control.family, ", " control.inla = control.inla,
## control.fixed = control.fixed, ", " control.mode = control.mode,
## control.expert = control.expert, ", " control.hazard = control.hazard,
## control.lincomb = control.lincomb, ", " control.update =
## control.update, control.lp.scale = control.lp.scale, ", "
## control.pardiso = control.pardiso, only.hyperparam = only.hyperparam,
## ", " inla.call = inla.call, inla.arg = inla.arg, num.threads =
## num.threads, ", " keep = keep, working.directory = working.directory,
## silent = silent, ", " inla.mode = inla.mode, safe = FALSE, debug =
## debug, .parent.frame = .parent.frame)" )
## Time used:
## Pre = 4.56, Running = 0.239, Post = 0.0139, Total = 4.81
## Fixed effects:
## mean sd 0.025quant 0.5quant 0.975quant mode kld
## (Intercept) 0.131 0.004 0.123 0.131 0.14 0.131 0
##
## Random effects:
## Name Model
## LA BYM2 model
##
## Model hyperparameters:
## mean sd 0.025quant 0.5quant 0.975quant mode
## Precision for LA 9.801 0.952 7.999 9.776 11.742 8.95
## Phi for LA 0.957 0.036 0.862 0.967 0.995 0.96
##
## Watanabe-Akaike information criterion (WAIC) ...: 3262.58
## Effective number of parameters .................: 158.88
##
## Marginal log-Likelihood: -1968.79
## is computed
## Posterior summaries for the linear predictor and the fitted values are computed
## (Posterior marginals needs also 'control.compute=list(return.marginals.predictor=TRUE)')
Check for spatial auto-correlation
#Plot model residuals to reveal patterns of autocorrelation
#Plot Moran's I
#We assume spatial stationary, whereby i.e. spatial autocorrelation and effects of environmental correlates to are constant across the region
#Dormann 2007
We can now calculate the probabilities that the smoothed RR estimates are greater than a given threshold value These probabilities are called exceedance probabilities and are useful to assess unusual elevation of disease risk (Richardson, Thomson, Best, and Elliott, 2004)
Obtain the posterior summary statistics (mean and posterior probability that the residual is above 1 - (or log-residual is above 0)) of the parameters of interest:
RR_sBYM <- c()
#take the exponent since it's logarithmic
for (i in 1:316) {
RR_sBYM[i] <- inla.emarginal(
function(x) exp(x),
sBYM.model$marginals.random$LA[[i]]
)
}
# Posterior probabilities
RR_sBYM_marg <- sBYM.model$marginals.random$LA[1:316]
PP_sBYM <- lapply(RR_sBYM_marg, function(x) {
1 - inla.pmarginal(0, x)
})
Obtain the posterior estimates from the spatial model to be plotted, that is (i) the area level posterior mean of the residual RRs and (ii) the posterior probability (PP) that the residual RRs > 1.
Residual RR in area relative to the region average after adjusting for the overall risk
resRR_PP <- data.frame(
resRR = RR_sBYM,
PP = unlist(PP_sBYM),
Level = cases_agg[, 1]
)
Produce a map of the posterior mean of the residual RRs and the posterior probabilities for which the residual RRs are > 1
Spatially smoothed residual RR using BYM2: exp(u + v)
resRR_PP$resRRcat <- cut(resRR_PP$resRR, breaks = c(
min(resRR_PP$resRR),
0.4, 0.6, 0.8, 1, 1.2, 1.4, 1.6,
max(resRR_PP$resRR)
), include.lowest = T)
# breakpoints
resRR_PP$PPcat <- cut(resRR_PP$PP, c(0, 0.2, 0.8, 1.00), include.lowest = TRUE)
map_RR_PP <- left_join(map, resRR_PP, by = c("lad19cd" = "Level"))
# Produce the maps of the posterior mean of the residual RRs and the posterior probabilities PP using ggplot2 package.
library(viridis)
p1 <- ggplot() +
geom_sf(data = map_RR_PP) +
aes(fill = resRRcat) +
theme_bw() +
scale_fill_brewer(palette = "PuOr") +
guides(fill = guide_legend(title = "RR")) +
ggtitle("Posterior mean of the residual RR Spatial model") +
theme(
text = element_text(size = 15),
axis.text.x = element_blank(),
axis.text.y = element_blank(), plot.title = element_text(size = 12, face = "bold")
)
p2 <- ggplot() +
geom_sf(data = map_RR_PP) +
aes(fill = PPcat) +
theme_bw() +
scale_fill_viridis(
option = "plasma", name = "PP",
discrete = T,
direction = -1,
guide = guide_legend(
title.position = "top",
reverse = T
)
) +
ggtitle("PP Spatial model (Exceedance probabilities)") +
theme(
text = element_text(size = 15),
axis.text.x = element_blank(),
axis.text.y = element_blank(), plot.title = element_text(size = 12, face = "bold")
)
print(p1)
print(p2)
sBYM.model$summary.hyperpar
## mean sd 0.025quant 0.5quant 0.975quant mode
## Precision for LA 9.8009242 0.95198671 7.9990920 9.7756295 11.7421155 8.9461329
## Phi for LA 0.9571348 0.03616949 0.8619926 0.9673646 0.9952879 0.9595059
Now combine fixed with random spatial effects
LA <- seq(1, 316)
formula <- O ~ PM2.5 + f(LA, model = "bym2", graph = map.adj, hyper = list(
prec = list(
prior = "pc.prec",
param = c(0.6 / 0.31, 0.01)
),
phi = list(
prior = "pc",
param = c(0.6, 2 / 3)
)
))
fixed.sBYM.model <- inla(formula = formula, family = "poisson", data = data, E = E, control.compute = list(waic = TRUE))
summary(fixed.sBYM.model, summary.stat = c("mean", "p0.975", "p0.025", "p0.5in", "p0.5out", "waic"))
##
## Call:
## c("inla.core(formula = formula, family = family, contrasts = contrasts,
## ", " data = data, quantiles = quantiles, E = E, offset = offset, ", "
## scale = scale, weights = weights, Ntrials = Ntrials, strata = strata,
## ", " lp.scale = lp.scale, link.covariates = link.covariates, verbose =
## verbose, ", " lincomb = lincomb, selection = selection, control.compute
## = control.compute, ", " control.predictor = control.predictor,
## control.family = control.family, ", " control.inla = control.inla,
## control.fixed = control.fixed, ", " control.mode = control.mode,
## control.expert = control.expert, ", " control.hazard = control.hazard,
## control.lincomb = control.lincomb, ", " control.update =
## control.update, control.lp.scale = control.lp.scale, ", "
## control.pardiso = control.pardiso, only.hyperparam = only.hyperparam,
## ", " inla.call = inla.call, inla.arg = inla.arg, num.threads =
## num.threads, ", " keep = keep, working.directory = working.directory,
## silent = silent, ", " inla.mode = inla.mode, safe = FALSE, debug =
## debug, .parent.frame = .parent.frame)" )
## Time used:
## Pre = 4.76, Running = 0.277, Post = 0.0149, Total = 5.05
## Fixed effects:
## mean sd 0.025quant 0.5quant 0.975quant mode kld
## (Intercept) 0.528 0.221 0.093 0.529 0.960 0.529 0
## PM2.5 -0.041 0.023 -0.086 -0.041 0.004 -0.041 0
##
## Random effects:
## Name Model
## LA BYM2 model
##
## Model hyperparameters:
## mean sd 0.025quant 0.5quant 0.975quant mode
## Precision for LA 10.050 1.056 8.025 10.030 12.176 8.990
## Phi for LA 0.942 0.044 0.829 0.954 0.993 0.943
##
## Watanabe-Akaike information criterion (WAIC) ...: 3263.55
## Effective number of parameters .................: 159.30
##
## Marginal log-Likelihood: -1974.32
## is computed
## Posterior summaries for the linear predictor and the fitted values are computed
## (Posterior marginals needs also 'control.compute=list(return.marginals.predictor=TRUE)')
{r spatial-temporal} # map_agg <- left_join(map, cases, by = join_by(lad19cd == Level)) # map_agg = map_agg %>% dplyr::rename(O = Observed, E = Expected) # # #Create the ID for year (time) # map_agg$ID.time = map_agg$Year - 2012 # # #Create the ID for space # map_agg$ID.space = rep(seq(1,316),each=7) # # formula_ST_noint = O ~ f(ID.space, model="bym2", graph=map.adj, # hyper=list(prec = list( # prior = "pc.prec", # param = c(0.5 / 0.31, 0.01)), # phi = list( # prior = "pc", # param = c(0.5, 2 / 3)))) + f(ID.time,model="rw1", hyper=list(prec = list( # prior = "pc.prec", # param = c(0.5 / 0.31, 0.01)))) # # stBYM.model = inla(formula=formula_ST_noint, family="poisson", # data=map_agg, E=E, # control.compute=list(waic=TRUE)) # # #Spatial Relative risks # RR_stBYM = c() # # for(i in 1:316){ # RR_stBYM[i] = inla.emarginal(function(x) exp(x), # stBYM.model$marginals.random$ID.space[[i]]) # } # # #Posterior probabilities (for spatial RR) # RR_stBYM_marg = stBYM.model$marginals.random$ID.space[1:316] # PP_stBYM = lapply(RR_stBYM_marg, function(x) {1-inla.pmarginal(0,x)}) # # #Temporal Relative risks and CI95 # RR_stRW_RR = c() # RR_stRW_lo = c() # RR_stRW_hi = c() # # for(i in 1:7){ # #Posterior mean # RR_stRW_RR[i] = inla.emarginal(function(x) exp(x), # stBYM.model$marginals.random$ID.time[[i]]) # #2.5% quantile # RR_stRW_lo[i] = inla.qmarginal(0.025,inla.tmarginal(function(x) exp(x), stBYM.model$marginals.random$ID.time[[i]])) # #97.5% quantile # RR_stRW_hi[i] = inla.qmarginal(0.975, inla.tmarginal(function(x) exp(x), stBYM.model$marginals.random$ID.time[[i]])) # } # # RR_stRW = data.frame(RR=RR_stRW_RR,low=RR_stRW_lo,high=RR_stRW_hi) # # library(ggplot2) # Temp1 = ggplot(RR_stRW, aes(seq(2013,2019), RR)) + geom_line() + ggtitle("ST model No Int") + geom_ribbon(aes(ymin=low,ymax=high), alpha=0.2) + labs(x="year") # # print(Temp1) #{r time} # resRR_PP_st = data.frame(resRR=RR_stBYM, # PP=unlist(PP_stBYM), # lad19cd=map_agg[,2]) # # breakpoints # resRR_PP_st$resRRcat = cut(resRR_PP_st$resRR, breaks=c(min(resRR_PP_st$resRR), # 0.4, 0.6, 0.8, 1, 1.2, 1.4, 1.6, # max(resRR_PP_st$resRR)),include.lowest = T) # # resRR_PP_st$PPcat = cut(resRR_PP_st$PP, c(0, 0.2, 0.8, 1.00), include.lowest = TRUE) # # map_RR_ST = left_join(map, resRR_PP_st, by = c("lad19cd" = "lad19cd.lad19cd")) # # # library(viridis) # A package providing color palettes # # p3 = ggplot() + geom_sf(data = map_RR_ST) + aes(fill = resRRcat) + # theme_bw() + scale_fill_brewer(palette = "PuOr") + # guides(fill=guide_legend(title="RR")) + ggtitle("RR ST model") + # theme(text = element_text(size=15), # axis.text.x = element_blank(), # axis.text.y = element_blank(), plot.title = element_text(size = 12, face = "bold")) # # p4 = ggplot() + geom_sf(data = map_RR_ST) + aes(fill = PPcat) + # theme_bw() + # scale_fill_viridis( # option = "plasma", # name = "PP ST model", # discrete = T, # direction = -1, # guide = guide_legend( # title.position = 'top', # reverse = T # )) + ggtitle("PP ST model") + theme(text = element_text(size=15), # axis.text.x = element_blank(), # axis.text.y = element_blank(), plot.title = element_text(size = 12, face = "bold")) # # print(p3) # print(p4) #