####Loading Packages####
libraries <- c("tidyverse", "tidycensus", "sf", "tigris", "tmap",
"rmapshaper", "sp", "spdep", "spatstat", "readxl",
"GGally", "Hmisc", "ggplot2", "MASS", "VIM", "readr", "openxlsx", "olsrr",
"margins", "prediction", "webuse", "sjPlot", "coefplot", "corrplot",
"spatialreg", "spdep", "knitr", "stargazer", "broom", "glmmfields",
"arm", "dotwhisker", "jtools", "sandwich", "mgcv", "rgdal", "gtsummary", "gapminder", "car", "knitr", "kableExtra")
for (i in libraries){
tryCatch(library(i, character.only = TRUE),
print(paste(i, "installed")),
error = function(e) {install.packages(i);
library(i, character.only = TRUE)}
)
print(paste(i, "loaded"))
}
## [1] "tidyverse installed"
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr 1.1.2 ✔ readr 2.1.4
## ✔ forcats 1.0.0 ✔ stringr 1.5.0
## ✔ ggplot2 3.4.2 ✔ tibble 3.2.1
## ✔ lubridate 1.9.2 ✔ tidyr 1.3.0
## ✔ purrr 1.0.1
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag() masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
## [1] "tidyverse loaded"
## [1] "tidycensus installed"
## [1] "tidycensus loaded"
## [1] "sf installed"
## Linking to GEOS 3.11.0, GDAL 3.5.3, PROJ 9.1.0; sf_use_s2() is TRUE
## [1] "sf loaded"
## [1] "tigris installed"
## To enable caching of data, set `options(tigris_use_cache = TRUE)`
## in your R script or .Rprofile.
## [1] "tigris loaded"
## [1] "tmap installed"
## The legacy packages maptools, rgdal, and rgeos, underpinning the sp package,
## which was just loaded, will retire in October 2023.
## Please refer to R-spatial evolution reports for details, especially
## https://r-spatial.org/r/2023/05/15/evolution4.html.
## It may be desirable to make the sf package available;
## package maintainers should consider adding sf to Suggests:.
## The sp package is now running under evolution status 2
## (status 2 uses the sf package in place of rgdal)
## [1] "tmap loaded"
## [1] "rmapshaper installed"
## [1] "rmapshaper loaded"
## [1] "sp installed"
## [1] "sp loaded"
## [1] "spdep installed"
## Loading required package: spData
## To access larger datasets in this package, install the spDataLarge
## package with: `install.packages('spDataLarge',
## repos='https://nowosad.github.io/drat/', type='source')`
## [1] "spdep loaded"
## [1] "spatstat installed"
## Loading required package: spatstat.data
## Loading required package: spatstat.geom
## spatstat.geom 3.2-2
## Loading required package: spatstat.random
## spatstat.random 3.1-5
## Loading required package: spatstat.explore
## Loading required package: nlme
##
## Attaching package: 'nlme'
##
## The following object is masked from 'package:dplyr':
##
## collapse
##
## spatstat.explore 3.2-1
## Loading required package: spatstat.model
## Loading required package: rpart
## spatstat.model 3.2-4
## Loading required package: spatstat.linnet
## spatstat.linnet 3.1-1
##
## spatstat 3.0-6
## For an introduction to spatstat, type 'beginner'
## [1] "spatstat loaded"
## [1] "readxl installed"
## [1] "readxl loaded"
## [1] "GGally installed"
## Registered S3 method overwritten by 'GGally':
## method from
## +.gg ggplot2
## [1] "GGally loaded"
## [1] "Hmisc installed"
##
## Attaching package: 'Hmisc'
##
## The following objects are masked from 'package:dplyr':
##
## src, summarize
##
## The following objects are masked from 'package:base':
##
## format.pval, units
## [1] "Hmisc loaded"
## [1] "ggplot2 installed"
## [1] "ggplot2 loaded"
## [1] "MASS installed"
##
## Attaching package: 'MASS'
##
## The following object is masked from 'package:spatstat.geom':
##
## area
##
## The following object is masked from 'package:dplyr':
##
## select
## [1] "MASS loaded"
## [1] "VIM installed"
## Loading required package: colorspace
##
## Attaching package: 'colorspace'
##
## The following object is masked from 'package:spatstat.geom':
##
## coords
##
## Loading required package: grid
##
## Attaching package: 'grid'
##
## The following object is masked from 'package:spatstat.geom':
##
## as.mask
##
## VIM is ready to use.
##
## Suggestions and bug-reports can be submitted at: https://github.com/statistikat/VIM/issues
##
## Attaching package: 'VIM'
##
## The following object is masked from 'package:datasets':
##
## sleep
## [1] "VIM loaded"
## [1] "readr installed"
## [1] "readr loaded"
## [1] "openxlsx installed"
## [1] "openxlsx loaded"
## [1] "olsrr installed"
##
## Attaching package: 'olsrr'
##
## The following object is masked from 'package:MASS':
##
## cement
##
## The following object is masked from 'package:datasets':
##
## rivers
## [1] "olsrr loaded"
## [1] "margins installed"
## [1] "margins loaded"
## [1] "prediction installed"
## [1] "prediction loaded"
## [1] "webuse installed"
## [1] "webuse loaded"
## [1] "sjPlot installed"
## [1] "sjPlot loaded"
## [1] "coefplot installed"
## [1] "coefplot loaded"
## [1] "corrplot installed"
## corrplot 0.92 loaded
## [1] "corrplot loaded"
## [1] "spatialreg installed"
## Loading required package: Matrix
##
## Attaching package: 'Matrix'
##
## The following objects are masked from 'package:tidyr':
##
## expand, pack, unpack
##
##
## Attaching package: 'spatialreg'
##
## The following objects are masked from 'package:spdep':
##
## get.ClusterOption, get.coresOption, get.mcOption,
## get.VerboseOption, get.ZeroPolicyOption, set.ClusterOption,
## set.coresOption, set.mcOption, set.VerboseOption,
## set.ZeroPolicyOption
## [1] "spatialreg loaded"
## [1] "spdep installed"
## [1] "spdep loaded"
## [1] "knitr installed"
## [1] "knitr loaded"
## [1] "stargazer installed"
##
## Please cite as:
##
## Hlavac, Marek (2022). stargazer: Well-Formatted Regression and Summary Statistics Tables.
## R package version 5.2.3. https://CRAN.R-project.org/package=stargazer
## [1] "stargazer loaded"
## [1] "broom installed"
## [1] "broom loaded"
## [1] "glmmfields installed"
## Loading required package: Rcpp
##
## Attaching package: 'glmmfields'
##
## The following object is masked from 'package:broom':
##
## tidy
## [1] "glmmfields loaded"
## [1] "arm installed"
## Loading required package: lme4
##
## Attaching package: 'lme4'
##
## The following object is masked from 'package:nlme':
##
## lmList
##
##
## arm (Version 1.13-1, built: 2022-8-25)
##
## Working directory is /Users/sherigudez/Desktop/Research/Quant_Spat Analysis
##
##
## Attaching package: 'arm'
##
## The following object is masked from 'package:corrplot':
##
## corrplot
##
## The following objects are masked from 'package:coefplot':
##
## coefplot, coefplot.default, invlogit
##
## The following object is masked from 'package:spatstat.geom':
##
## rescale
## [1] "arm loaded"
## [1] "dotwhisker installed"
## [1] "dotwhisker loaded"
## [1] "jtools installed"
##
## Attaching package: 'jtools'
##
## The following object is masked from 'package:arm':
##
## standardize
##
## The following object is masked from 'package:glmmfields':
##
## tidy
##
## The following object is masked from 'package:Hmisc':
##
## %nin%
## [1] "jtools loaded"
## [1] "sandwich installed"
## [1] "sandwich loaded"
## [1] "mgcv installed"
## This is mgcv 1.8-42. For overview type 'help("mgcv-package")'.
## [1] "mgcv loaded"
## [1] "rgdal installed"
## Please note that rgdal will be retired during October 2023,
## plan transition to sf/stars/terra functions using GDAL and PROJ
## at your earliest convenience.
## See https://r-spatial.org/r/2023/05/15/evolution4.html and https://github.com/r-spatial/evolution
## rgdal: version: 1.6-7, (SVN revision 1203)
## Geospatial Data Abstraction Library extensions to R successfully loaded
## Loaded GDAL runtime: GDAL 3.5.3, released 2022/10/21
## Path to GDAL shared files: /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/rgdal/gdal
## GDAL does not use iconv for recoding strings.
## GDAL binary built with GEOS: TRUE
## Loaded PROJ runtime: Rel. 9.1.0, September 1st, 2022, [PJ_VERSION: 910]
## Path to PROJ shared files: /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/rgdal/proj
## PROJ CDN enabled: FALSE
## Linking to sp version:1.6-1
## To mute warnings of possible GDAL/OSR exportToProj4() degradation,
## use options("rgdal_show_exportToProj4_warnings"="none") before loading sp or rgdal.
## [1] "rgdal loaded"
## [1] "gtsummary installed"
##
## Attaching package: 'gtsummary'
##
## The following object is masked from 'package:MASS':
##
## select
## [1] "gtsummary loaded"
## [1] "gapminder installed"
## [1] "gapminder loaded"
## [1] "car installed"
## Loading required package: carData
##
## Attaching package: 'car'
##
## The following object is masked from 'package:arm':
##
## logit
##
## The following object is masked from 'package:spatstat.model':
##
## bc
##
## The following object is masked from 'package:spatstat.geom':
##
## ellipse
##
## The following object is masked from 'package:dplyr':
##
## recode
##
## The following object is masked from 'package:purrr':
##
## some
## [1] "car loaded"
## [1] "knitr installed"
## [1] "knitr loaded"
## [1] "kableExtra installed"
##
## Attaching package: 'kableExtra'
##
## The following object is masked from 'package:dplyr':
##
## group_rows
## [1] "kableExtra loaded"
options(scipen = 999)
rm(list=ls())
libraries <- c("tidyverse", "tidycensus", "sf", "tigris", "tmap",
"rmapshaper", "sp", "spdep", "spatstat", "readxl",
"GGally", "Hmisc", "ggplot2", "MASS", "VIM", "readr", "openxlsx", "olsrr",
"margins", "prediction", "webuse", "sjPlot", "coefplot", "corrplot",
"spatialreg", "spdep", "knitr", "stargazer", "broom", "glmmfields",
"arm", "dotwhisker", "jtools", "sandwich", "mgcv", "rgdal", "gtsummary", "gapminder", "car", "knitr", "kableExtra")
options(tigris_class = "sf")
###Importing and Merging Crime Databases###
# Reading in police charge databases and correcting GEOID data transfer
SDPD_2019PCV <- read.csv("SDPD_2019PCV.csv", header = TRUE, sep = ",", colClasses = c("GEOID" = "character"))
SDPD_2019PCV$GEOID <- sprintf("%09s", SDPD_2019PCV$GEOID) # Pad GEOID with leading zeros
# Pulling separate Census Data (ACS) database as df for analysis
SD.city.tracts <- st_read("SD.city.tracts.shp", stringsAsFactors = FALSE)
## Reading layer `SD.city.tracts' from data source
## `/Users/sherigudez/Desktop/Research/Quant_Spat Analysis/SD.city.tracts.shp'
## using driver `ESRI Shapefile'
## Simple feature collection with 291 features and 32 fields
## Geometry type: MULTIPOLYGON
## Dimension: XY
## Bounding box: xmin: -117.2822 ymin: 32.53471 xmax: -116.9057 ymax: 33.11418
## Geodetic CRS: GRS 1980(IUGG, 1980)
# Merging police data with non-spatial census data for data tables and analysis
SD_2019Police_Census <- merge(SDPD_2019PCV, SD.city.tracts, by = "GEOID")
###Descriptive Statistics###
# Filter specific variables for the descriptive table by inserting desired variables
selected_vars <- c("Total Population"= "log_tpop", "Median Income"="medincome", "Percent Homeowner"="powner", "Percent Single Mother"= "psingm", "Percent with Bachelor's"= "pbach", "Percent Asian"="pnhasn", "Percent Hispanic"= "phisp", "Percent Black"= "pnhblk", "Percent Other"= "poth", "Percent Foreign Born"= "pfborn")
# Create a subset of the data with only the selected variables
subset_data <- SD_2019Police_Census[, selected_vars]
# Modify column names with custom variable names
colnames(subset_data) <- names(selected_vars)
# Generate descriptive table using stargazer
stargazer(subset_data,
type = "text",
title = "Descriptive Statistics",
colnames = TRUE)
##
## Descriptive Statistics
## ================================================================
## Statistic N Mean St. Dev. Min Max
## ----------------------------------------------------------------
## Total Population 291 3.674 0.176 3.210 4.370
## Median Income 291 82,493.750 35,778.260 25,469 196,299
## Percent Homeowner 291 17.365 8.806 0.000 39.175
## Percent Single Mother 291 3.511 2.911 0.000 14.703
## Percent with Bachelor's 291 26.256 14.466 1.738 66.704
## Percent Asian 291 14.923 14.003 0.000 70.722
## Percent Hispanic 291 29.894 24.549 4.193 97.325
## Percent Black 291 5.951 6.723 0.000 37.489
## Percent Other 291 4.306 2.531 0.000 13.054
## Percent Foreign Born 291 24.885 11.934 4.394 55.991
## ----------------------------------------------------------------
#, out = "Descp_Stat.html")
###CORRELATION### Creating a Bivariate Correlation Matrix of all Independent Variables
# Define the variables of interest
variables <- c("Total Pop."= "log_tpop", "Med. Income"="medincome", "% Homeowner"="powner", "% Fem.Led House"= "psingm", "% Bach.Deg"= "pbach", "% Asian"="pnhasn", "% Hispanic"= "phisp", "% Black"= "pnhblk", "% Other"= "poth", "% Foreign Born"= "pfborn")
# Compute the correlation matrix
correlation.matrix <- cor(SD_2019Police_Census[, variables])
# Modify column names with custom variable names
colnames(correlation.matrix) <- names(variables)
# Modify row names with custom variable names
rownames(correlation.matrix) <- names(variables)
# Generate the correlation table using stargazer
stargazer(correlation.matrix,
type = "text",
style = "asr",
title = "Correlation Matrix of Independent Variables",
colnames = TRUE)
##
## Correlation Matrix of Independent Variables
## -------------------------------------------------------------------------------------------------------------------------------
## Total Pop. Med. Income % Homeowner % Fem.Led House % Bach.Deg % Asian % Hispanic % Black % Other % Foreign Born
## -------------------------------------------------------------------------------------------------------------------------------
## Total Pop. 1 0.070 -0.037 0.073 -0.058 0.253 0.040 0.046 0.123 0.216
## Med. Income 0.070 1 0.729 -0.487 0.592 0.240 -0.638 -0.396 0.273 -0.295
## % Homeowner -0.037 0.729 1 -0.416 0.411 0.031 -0.531 -0.320 0.153 -0.382
## % Fem.Led House 0.073 -0.487 -0.416 1 -0.567 -0.135 0.591 0.339 -0.228 0.383
## % Bach.Deg -0.058 0.592 0.411 -0.567 1 0.107 -0.782 -0.472 0.286 -0.454
## % Asian 0.253 0.240 0.031 -0.135 0.107 1 -0.304 0.008 0.330 0.538
## % Hispanic 0.040 -0.638 -0.531 0.591 -0.782 -0.304 1 0.268 -0.455 0.530
## % Black 0.046 -0.396 -0.320 0.339 -0.472 0.008 0.268 1 -0.061 0.160
## % Other 0.123 0.273 0.153 -0.228 0.286 0.330 -0.455 -0.061 1 -0.158
## % Foreign Born 0.216 -0.295 -0.382 0.383 -0.454 0.538 0.530 0.160 -0.158 1
## -------------------------------------------------------------------------------------------------------------------------------
Scatter plots of Police Charges and Important Variables with Lines of Best Fit
# Define the variables and their corresponding x-labels
variablesSP <- c("medincome", "psingm", "pbach", "phisp", "pfborn")
xlabelsSP <- c("Median Income", "Percent Female Headed House", "Percent with Bachelor's",
"Percent Hispanic", "Percent Foreign Born")
ylabelsSP <- "Total Police Reports (2019)"
# Create a list to store the ggplot objects
plotSP <- vector("list", length(variablesSP))
# Iterate over the variables and create the plots
for (i in seq_along(variablesSP)) {
plotSP[[i]] <- ggplot(SD_2019Police_Census, aes_string(variablesSP[i], "pcharge_19")) +
stat_summary(fun.data = mean_cl_normal) +
geom_smooth(method = 'lm', formula = y ~ x) +
xlab(xlabelsSP[i]) +
ylab(ylabelsSP)
}
## Warning: `aes_string()` was deprecated in ggplot2 3.0.0.
## ℹ Please use tidy evaluation idioms with `aes()`.
## ℹ See also `vignette("ggplot2-in-packages")` for more information.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
# Print the plots
for (plotSP in plotSP) {
print(plotSP)
}
## Warning: Removed 287 rows containing missing values (`geom_segment()`).
## Warning: Removed 282 rows containing missing values (`geom_segment()`).
## Warning: Removed 291 rows containing missing values (`geom_segment()`).
## Warning: Removed 291 rows containing missing values (`geom_segment()`).
## Warning: Removed 291 rows containing missing values (`geom_segment()`).
###NEGGATIVE BINOMIAL REGRESSION MODELS### Collinearity Test of a NB Model (Not Lag Controlled)
# Fit the experimental negative binomial regression model
NegBM_19 <- glm.nb(pcharge_19 ~ log_tpop + medincome + powner + psingm + p15_29 + pnhasn + phisp + pnhblk + poth + pfborn, data = SD_2019Police_Census)
# Perform collinearity diagnostics
coll_diag_negbinom <- vif(NegBM_19)
coll_diag_negbinom
## log_tpop medincome powner psingm p15_29 pnhasn phisp pnhblk
## 1.120660 2.994610 3.468358 1.784426 1.987118 5.078461 5.413672 1.283005
## poth pfborn
## 1.398034 6.119009
NB Regression Models (No Spatial Lag Control)
# Run a negative binomial model with desired variables
NegBM_19 <- glm.nb(pcharge_19 ~ log_tpop + medincome + powner + psingm + p15_29 + pnhasn + phisp + pnhblk + poth + pfborn, data = SD_2019Police_Census, control = glm.control(maxit = 80000))
# Compute estimates and confidence intervals
est <- data.frame(Estimate = coef(NegBM_19), confint(NegBM_19))
## Waiting for profiling to be done...
stargazer(NegBM_19,
type= "text",
ci= TRUE,
ci.level= 0.95,
title="Negative Binomial Regression Results",
dep.var.labels= "Total Police Reports (2019)",
covariate.labels= c("Population (Log)", "Median Income","% Homeowner","% Fem.Led House","% Aged 15 to 29","% Asian","% Hispanic","% Black","% Other","% Foreign Born"),
style = "asr",
align=TRUE)
##
## Negative Binomial Regression Results
## --------------------------------------------
## Total Police Reports (2019)
## --------------------------------------------
## Population (Log) -0.460
## Median Income -0.00001**
## % Homeowner -0.013
## % Fem.Led House 0.013
## % Aged 15 to 29 0.009
## % Asian -0.025
## % Hispanic -0.029**
## % Black 0.025
## % Other -0.007
## % Foreign Born 0.032
## Constant 7.845**
## N 291
## Log Likelihood -1,481.441
## theta 0.266 (0.023)
## AIC 2,984.881
## --------------------------------------------
## *p < .05; **p < .01; ***p < .001
#, out= "regression.html")
est
## Estimate X2.5.. X97.5..
## (Intercept) 7.84453238174 1.52644959238 14.355987445164
## log_tpop -0.45981366045 -2.24164003007 1.265005102643
## medincome -0.00001471733 -0.00002690511 -0.000001702082
## powner -0.01311246550 -0.06731130988 0.039483382097
## psingm 0.01330981185 -0.09736969405 0.129518697424
## p15_29 0.00892472592 -0.02367625035 0.044339738530
## pnhasn -0.02546397115 -0.06380543475 0.012476558615
## phisp -0.02921767001 -0.05559719165 -0.002417563029
## pnhblk 0.02547288755 -0.01272247683 0.070775490960
## poth -0.00745132559 -0.11336402822 0.103660762657
## pfborn 0.03185240544 -0.01991487079 0.085274430777
#NB Regression Models (with Spatial Lag Control)
##############2019##############
#NegBM_19SL <- glm.nb(pcharge_19~ lag19 + log_tpop + medincome + powner + psingm + pbach + pnhasn + phisp + pnhblk + poth + pfborn, data = SD_2019Police_Census, control=glm.control(maxit=80000))
# Compute estimates and confidence intervals
#estSL <- data.frame(Estimate = coef(NegBM_19SL), confint(NegBM_19SL))
#Create table
#stargazer(NegBM_19SL,
# type= "text",
# ci= TRUE,
# ci.level= 0.95,
# title="Negative Binomial Regression Results",
# dep.var.labels= "Total Police Reports (2019)",
# covariate.labels= c("Population (Log)", "Median Income","% Homeowner","% Fem.Led House","% Aged 15 to 29","% Asian","% Hispanic","% Black","% Other","% Foreign Born"),
# style = "asr",
# align=TRUE)
#, out= "regression.html")
#estSL
###GRAPHING NB MODELS### Coefficient Plots (No Spatial Lag control)
#Creating labels for desired variables
coef_names <- c("Median Income" = "medincome", "% Homeowner" = "powner","% Single Mom" = "psingm", "% Bachelor's"= "pbach", "% Hispanic" = "phisp", "% Asian" = "pnhasn", "% Black" = "pnhblk", "% Other" = "poth", "% Foreign Born" = "pfborn")
coef_focused <- c("Median Income" = "medincome", "% Hispanic" = "phisp", "% Foreign Born" = "pfborn", "% Asian" = "pnhasn", "% Black" = "pnhblk")
#############2019 + FOCUSED
plot_summs(NegBM_19, robust = TRUE, inner_ci_level = .9,
coefs = (coef_names))
## Loading required namespace: broom.mixed
plot_summs(NegBM_19, robust = TRUE, inner_ci_level = .9,
coefs = (coef_focused))
###########ALL MODELS
#plot_summs(NegBM_19, NegBM_20, NegBM_21, robust = TRUE, inner_ci_level = .9, point.shape= T,
#model.names = c("2019", "2020", "2021"),
#coefs = (coef_names))
#################FOCUSED MODEL
#plot_summs(NegBM_19, NegBM_20, NegBM_21, robust = TRUE, inner_ci_level = .9, point.shape= T,
#model.names = c("2019", "2020", "2021"),
#coefs = (coef_focused))
###PREDICTIVE MARGINS OF NB MODELS### Predictive Margin Graphs (Not Lag Controlled)
# Set the theme
theme_set(theme_sjplot())
# Define the variables and corresponding titles
variablesPM <- c("medincome", "phisp", "pnhasn", "pnhblk", "pfborn")
titlesPM <- c("2019 Total Police Charges by Median Income",
"2019 Total Police Charges by Percent Hispanic",
"2019 Total Police Charges by Percent Asian",
"2019 Total Police Charges by Percent Black",
"2019 Total Police Charges by Percent Foreign Born")
xlabelsPM <- c("Median Income", "Percent Hispanic", "Percent Asian",
"Percent Black", "Percent Foreign Born")
ylabelsPM <- "2019 Police Reports"
# Create an empty list to store the plots
plotsPM <- vector("list", length(variablesPM))
# Plotting PM graphs for desired variables
for (i in seq_along(variablesPM)) {
plotPM <- plot_model(NegBM_19, type = "pred", terms = variablesPM[i], title = titlesPM[i], xlab = xlabelsPM[i], ylab = ylabelsPM)
plotPM <- plotPM + xlab(xlabelsPM[i]) + ylab(ylabelsPM)
plotsPM[[i]] <- plotPM
}
# Display the plots
for (plotPM in plotsPM) {
print(plotPM)
}
###SETTING UP SPATIAL DATA### Reading in Databases
# Pulling spatial census data (ACS) for maps
SD.city.tractsMAPS <- st_read("SD.city.tracts.shp")
## Reading layer `SD.city.tracts' from data source
## `/Users/sherigudez/Desktop/Research/Quant_Spat Analysis/SD.city.tracts.shp'
## using driver `ESRI Shapefile'
## Simple feature collection with 291 features and 32 fields
## Geometry type: MULTIPOLYGON
## Dimension: XY
## Bounding box: xmin: -117.2822 ymin: 32.53471 xmax: -116.9057 ymax: 33.11418
## Geodetic CRS: GRS 1980(IUGG, 1980)
# Merging spatial data for maps
SD_2019Police_CensusMAPS <- merge(SD.city.tractsMAPS, SDPD_2019PCV, by = "GEOID")
Neighbor Connectivity, Neighbor Weights & Weight Matrixes,and Visualizing Neighbor Weights
#determining neighbor connectivity using a Queen adjacency method
pd19<-poly2nb(SD_2019Police_CensusMAPS, queen=T)
summary(pd19) #check to make sure output looks right
## Neighbour list object:
## Number of regions: 291
## Number of nonzero links: 1702
## Percentage nonzero weights: 2.009896
## Average number of links: 5.848797
## 2 regions with no links:
## 250 253
## Link number distribution:
##
## 0 2 3 4 5 6 7 8 9 10 11 12 13 15
## 2 8 21 42 50 67 55 25 11 4 1 1 2 2
## 8 least connected regions:
## 228 251 252 255 282 283 285 289 with 2 links
## 2 most connected regions:
## 100 208 with 15 links
#creating a distance based neighbor object where d2 is 20 miles (32186.9)
centroids <- st_centroid(st_geometry(SD.city.tracts))
sdnb_dist1 <- dnearneigh(centroids, d1 = 0, d2 = 32186.9,
row.names = SD_2019Police_CensusMAPS$GEOID)
#Red lines indicate that neighboring census tracts that are connected have influence on each other
plot(st_geometry(SD.city.tracts), border = "grey60", reset = FALSE)
plot(pd19, coords = centroids, add=T, col = "red")
###AUTO-CORRELATION ANALYSIS (Based on OLS)### Spatial Autocorrelation using Moran’s I
pd19w<-nb2listw(pd19, style="W", zero.policy = T)
#Tracts with no neighbors are dropped
moran.plot(SD_2019Police_CensusMAPS$pcharge_19, listw=pd19w, zero.policy= T, xlab="Standardized Total Police Charge Prevalence", ylab="Neighbors Standardized Total PC Prevalence",
main=c("Moran Scatterplot for Total Police Charges (2019)", "in San Diego") )
Local Moran’s I for Local Spatial Autocorrelation
locali19 <-localmoran(SD_2019Police_CensusMAPS$pcharge_19, pd19w, zero.policy =T)
#saving local statistics in the database
SD_2019Police_CensusMAPS <- SD_2019Police_CensusMAPS %>%
mutate(localmi19 = locali19[,1], localz19 = locali19[,4], zero.policy =T)
breaks19i <- c(min(SD_2019Police_CensusMAPS$pcharge_19), -1.96, 1.96, max(SD_2019Police_CensusMAPS$pcharge_19))
Mapping Local Moran’s I
# Designating areas with Z-Scores greater than 1.96 and lower than -1.96 as high cluster areas and low cluster areas, respectively
SD_2019Police_CensusMAPS <- SD_2019Police_CensusMAPS %>%
mutate(mcluster19 = cut(localz19, breaks = breaks19i, labels = c("Negative Correlation", "Not Significant", "Positive Correlation"), include.lowest = TRUE))
# Filter out missing or NA values in the mcluster19 variable
SD_2019Police_CensusMAPS_filtered <- SD_2019Police_CensusMAPS[!is.na(SD_2019Police_CensusMAPS$mcluster19), ]
# Create the map
local.ac.sd19 <- tm_shape(SD_2019Police_CensusMAPS_filtered, unit = "mi") +
tm_polygons(col = "mcluster19", title = "", palette = "-RdBu",
breaks = breaks19i) +
tm_layout(frame = FALSE, main.title = "Police Charge Clusters (2019)",
legend.outside = TRUE) +
tm_basemap("OpenStreetMap")
local.ac.sd19
Getis-Ord for Local Spatial Autocorrelation
localg19 <-localG(SD_2019Police_CensusMAPS$pcharge_19, pd19w, zero.policy =T)
SD_2019Police_CensusMAPS <- SD_2019Police_CensusMAPS %>%
mutate(localg19 = as.numeric(localg19))
breaks19 <- c(min(SD_2019Police_CensusMAPS$pcharge_19), -2.58, -1.96, -1.65, 1.65, 1.96, 2.58, max(SD_2019Police_Census$pcharge_19))
sd.self19 <- include.self(pd19)
sd.w.self19 <- nb2listw(sd.self19, style="W", zero.policy = T)
localgstar19<-localG(SD_2019Police_CensusMAPS$pcharge_19,sd.w.self19)
SD_2019Police_CensusMAPS <- SD_2019Police_CensusMAPS %>%
mutate(localgstar19 = as.numeric(localgstar19))
SD_2019Police_CensusMAPS<- SD_2019Police_CensusMAPS %>%
mutate(gcluster19 = cut(localgstar19, breaks=breaks19, include.lowest = TRUE, labels=c("Cold spot: 99% confidence", "Cold spot: 95% confidence", "Cold spot: 90% confidence", "Not significant","Hot spot: 90% confidence", "Hot spot: 95% confidence", "Hot spot: 99% confidence")))
#Large positive value = cluster of large number of total police reports; large negative value= cluster of low number of police reports
tm_shape(SD_2019Police_CensusMAPS, unit = "mi") +
tm_polygons(col = "gcluster19", title = "", palette = "-RdBu",
breaks = breaks19) +
tm_layout(frame = F, main.title = "Total PC Clusters (2019)",
legend.outside = T)
Monte Carlo Simulation to Get a P-Value
moran.mc(SD_2019Police_Census$pcharge_19, pd19w, zero.policy= T, nsim=999)
##
## Monte-Carlo simulation of Moran I
##
## data: SD_2019Police_Census$pcharge_19
## weights: pd19w
## number of simulations + 1: 1000
##
## statistic = 0.18434, observed rank = 1000, p-value = 0.001
## alternative hypothesis: greater
#The Moran's I (0.18; 0.16; 0.14) result represents the highest value of Moran's I out of the 999 simulations (1/(999 + 1 observed)=0.001, p-value is significant through all tests confirming spatial autocorrelation
Global Moran’s I Test for Spatial Autocorrelation
moran.test(SD_2019Police_Census$pcharge_19, pd19w, zero.policy = T)
##
## Moran I test under randomisation
##
## data: SD_2019Police_Census$pcharge_19
## weights: pd19w n reduced by no-neighbour observations
##
##
## Moran I statistic standard deviate = 5.7731, p-value = 0.000000003892
## alternative hypothesis: greater
## sample estimates:
## Moran I statistic Expectation Variance
## 0.184336006 -0.003472222 0.001058311
#Moran's I value is low, but the P-Value = 0 for all tests, indicating statistical significance for spatial autocorrelation of police charge reports in San Diego
###SPATIAL LAG AND LAG ERROR MODELS USING OLS#### Spatial Lag Model (2019)
# Fit spatial lag regression model
fit.lag19 <- lagsarlm(pcharge_19 ~ log_tpop + medincome + powner + psingm + p15_29 + pnhasn + phisp + pnhblk + poth + pfborn, data = SD_2019Police_Census, listw = pd19w, zero.policy = TRUE)
# Print model summary
summary(fit.lag19)
##
## Call:lagsarlm(formula = pcharge_19 ~ log_tpop + medincome + powner +
## psingm + p15_29 + pnhasn + phisp + pnhblk + poth + pfborn,
## data = SD_2019Police_Census, listw = pd19w, zero.policy = TRUE)
##
## Residuals:
## Min 1Q Median 3Q Max
## -227.278 -73.764 -18.729 31.726 1708.139
##
## Type: lag
## Regions with no neighbours included:
## 250 253
## Coefficients: (asymptotic standard errors)
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 279.45994074 216.24516250 1.2923 0.19624
## log_tpop -20.67104178 57.60012789 -0.3589 0.71969
## medincome -0.00071746 0.00046715 -1.5358 0.12459
## powner -2.32796775 2.02915142 -1.1473 0.25127
## psingm 1.02145103 4.40047329 0.2321 0.81644
## p15_29 0.67858145 1.30004008 0.5220 0.60169
## pnhasn -1.92447201 1.54784720 -1.2433 0.21375
## phisp -1.78516685 0.91197977 -1.9575 0.05029
## pnhblk 1.78945886 1.61959510 1.1049 0.26921
## poth -2.41421403 4.47939869 -0.5390 0.58991
## pfborn 2.07435222 1.99025819 1.0423 0.29729
##
## Rho: 0.20514, LR test value: 6.2142, p-value: 0.012673
## Asymptotic standard error: 0.085723
## z-value: 2.3931, p-value: 0.016708
## Wald statistic: 5.7268, p-value: 0.016708
##
## Log likelihood: -1896.666 for lag model
## ML residual variance (sigma squared): 26635, (sigma: 163.2)
## Number of observations: 291
## Number of parameters estimated: 13
## AIC: 3819.3, (AIC for lm: 3823.5)
## LM test for residual autocorrelation
## test value: 15.002, p-value: 0.00010742
# Compute impacts of predictors
impacts_summary <- summary(impacts(fit.lag19, listw = pd19w, zero.policy = TRUE, R = 500), zstat = TRUE)
# Print impacts summary
print(impacts_summary)
## Impact measures (lag, exact):
## Direct Indirect Total
## log_tpop -20.834204918 -5.1717255707 -26.0059304889
## medincome -0.000723122 -0.0001795023 -0.0009026244
## powner -2.346343143 -0.5824384889 -2.9287816320
## psingm 1.029513670 0.2555586927 1.2850723628
## p15_29 0.683937709 0.1697755278 0.8537132364
## pnhasn -1.939662478 -0.4814871543 -2.4211496326
## phisp -1.799257745 -0.4466341443 -2.2458918891
## pnhblk 1.803583627 0.4477079687 2.2512915956
## poth -2.433270193 -0.6040166028 -3.0372867958
## pfborn 2.090725746 0.5189859582 2.6097117043
## ========================================================
## Simulation results ( variance matrix):
## Direct:
##
## Iterations = 1:500
## Thinning interval = 1
## Number of chains = 1
## Sample size per chain = 500
##
## 1. Empirical mean and standard deviation for each variable,
## plus standard error of the mean:
##
## Mean SD Naive SE Time-series SE
## log_tpop -24.1896029 58.2699527 2.60591151 2.60591151
## medincome -0.0007153 0.0005003 0.00002237 0.00002237
## powner -2.1902759 2.0074705 0.08977681 0.08977681
## psingm 1.2067410 4.5514229 0.20354582 0.20354582
## p15_29 0.7842126 1.2244118 0.05475736 0.05475736
## pnhasn -2.0080742 1.5211784 0.06802917 0.06802917
## phisp -1.8125347 0.9204130 0.04116212 0.04116212
## pnhblk 1.8413994 1.7202150 0.07693035 0.07693035
## poth -2.4613476 4.3100120 0.19274959 0.19274959
## pfborn 2.1940933 1.9140957 0.08560096 0.08560096
##
## 2. Quantiles for each variable:
##
## 2.5% 25% 50% 75% 97.5%
## log_tpop -137.952263 -61.280740 -25.6523415 14.4862113 93.9619324
## medincome -0.001634 -0.001077 -0.0007084 -0.0004011 0.0002498
## powner -5.967133 -3.481399 -2.1847480 -0.8978393 1.5803810
## psingm -8.116602 -1.788353 1.2162742 4.4773810 9.4545354
## p15_29 -1.554178 -0.081424 0.7336207 1.6580685 3.2304259
## pnhasn -4.691533 -3.114617 -1.9811271 -1.0072521 0.9288740
## phisp -3.608457 -2.457447 -1.8250038 -1.1587527 -0.1252489
## pnhblk -1.526702 0.735137 1.8970514 3.0468839 5.1239092
## poth -10.757310 -5.402314 -2.2010099 0.4680687 6.1472779
## pfborn -1.308948 0.856844 2.2944988 3.4572033 5.4628426
##
## ========================================================
## Indirect:
##
## Iterations = 1:500
## Thinning interval = 1
## Number of chains = 1
## Sample size per chain = 500
##
## 1. Empirical mean and standard deviation for each variable,
## plus standard error of the mean:
##
## Mean SD Naive SE Time-series SE
## log_tpop -6.3238548 17.2052795 0.769443490 0.769443490
## medincome -0.0001801 0.0001656 0.000007406 0.000006843
## powner -0.5772006 0.6384374 0.028551790 0.028551790
## psingm 0.3104040 1.3968037 0.062466962 0.062466962
## p15_29 0.1869374 0.3562902 0.015933784 0.015933784
## pnhasn -0.5069395 0.5048340 0.022576861 0.021699426
## phisp -0.4614619 0.3527621 0.015776000 0.014101084
## pnhblk 0.4636549 0.5313868 0.023764341 0.023764341
## poth -0.6558617 1.3019649 0.058225642 0.054442818
## pfborn 0.5476734 0.6037687 0.027001358 0.024414903
##
## 2. Quantiles for each variable:
##
## 2.5% 25% 50% 75% 97.5%
## log_tpop -46.2148136 -14.65305 -4.7610064 2.70254015 24.73559290
## medincome -0.0005439 -0.00026 -0.0001521 -0.00007549 0.00007097
## powner -2.0086919 -0.90939 -0.5222355 -0.14448229 0.42567245
## psingm -2.1021954 -0.42104 0.1715053 1.08474967 3.02571763
## p15_29 -0.4417915 -0.01269 0.1468478 0.37817823 0.93608547
## pnhasn -1.7511890 -0.71876 -0.4232746 -0.18893544 0.24604986
## phisp -1.2371999 -0.63663 -0.3951392 -0.22990503 0.00787405
## pnhblk -0.3934734 0.11226 0.3898608 0.73036521 1.75474333
## poth -3.8018306 -1.33594 -0.4665026 0.07299046 1.62004927
## pfborn -0.3856601 0.16009 0.4707114 0.86870874 2.04760713
##
## ========================================================
## Total:
##
## Iterations = 1:500
## Thinning interval = 1
## Number of chains = 1
## Sample size per chain = 500
##
## 1. Empirical mean and standard deviation for each variable,
## plus standard error of the mean:
##
## Mean SD Naive SE Time-series SE
## log_tpop -30.5134578 73.6721553 3.29471895 3.29471895
## medincome -0.0008955 0.0006296 0.00002816 0.00002816
## powner -2.7674766 2.5409102 0.11363296 0.11363296
## psingm 1.5171450 5.7902017 0.25894569 0.25894569
## p15_29 0.9711500 1.5416345 0.06894399 0.06894399
## pnhasn -2.5150137 1.9267006 0.08616467 0.08616467
## phisp -2.2739966 1.1774593 0.05265758 0.04659110
## pnhblk 2.3050542 2.1555845 0.09640067 0.09640067
## poth -3.1172094 5.4794307 0.24504759 0.24504759
## pfborn 2.7417668 2.4077644 0.10767850 0.10767850
##
## 2. Quantiles for each variable:
##
## 2.5% 25% 50% 75% 97.5%
## log_tpop -172.519370 -76.886981 -31.4479977 16.9757671 118.6589798
## medincome -0.002059 -0.001318 -0.0008741 -0.0004842 0.0003139
## powner -7.480654 -4.385401 -2.8680420 -1.1081296 1.9903612
## psingm -10.143378 -2.339629 1.5153199 5.6343817 12.3150137
## p15_29 -2.044448 -0.097802 0.9400566 2.0340944 4.1323021
## pnhasn -6.100813 -3.800372 -2.4493926 -1.2168360 1.2426532
## phisp -4.811421 -2.972984 -2.2948344 -1.4535170 -0.1563540
## pnhblk -1.903717 0.920458 2.4449852 3.7844713 6.5216827
## poth -13.892636 -6.775992 -2.8492100 0.5819473 7.4878699
## pfborn -1.879956 1.084284 2.8731970 4.2762592 7.3351401
# Print interpretation
cat("Percent Hispanic is significant at the p-value > 0.05, and Median income is significant at the p-value > 0.1 level. However, the Rho = 0.21 is rather small, but statistically significant (p-value = 0.01) across all tests. This indicates that there is spatial dependence among the significant predictors. A change in a predictor in one census tract has a rippling effect on its neighboring census tract's total police charge count. This poses a problem because we can't interpret the coefficients as marginal effects due to the spatial dependency creating bias in the coefficients. The impacts analysis shows the direct, indirect, and total effects of predictors on the total police charges in neighboring tracts.")
## Percent Hispanic is significant at the p-value > 0.05, and Median income is significant at the p-value > 0.1 level. However, the Rho = 0.21 is rather small, but statistically significant (p-value = 0.01) across all tests. This indicates that there is spatial dependence among the significant predictors. A change in a predictor in one census tract has a rippling effect on its neighboring census tract's total police charge count. This poses a problem because we can't interpret the coefficients as marginal effects due to the spatial dependency creating bias in the coefficients. The impacts analysis shows the direct, indirect, and total effects of predictors on the total police charges in neighboring tracts.
Spatial Lag Error Model
# Fit spatial error regression model
fit.err19 <- errorsarlm(pcharge_19 ~ log_tpop + medincome + powner + psingm + p15_29 + pnhasn + phisp + pnhblk + poth + pfborn, data = SD_2019Police_Census, listw = pd19w, zero.policy = TRUE)
# Print model summary
summary(fit.err19)
##
## Call:errorsarlm(formula = pcharge_19 ~ log_tpop + medincome + powner +
## psingm + p15_29 + pnhasn + phisp + pnhblk + poth + pfborn,
## data = SD_2019Police_Census, listw = pd19w, zero.policy = TRUE)
##
## Residuals:
## Min 1Q Median 3Q Max
## -212.639 -79.387 -18.076 34.037 1705.984
##
## Type: error
## Regions with no neighbours included:
## 250 253
## Coefficients: (asymptotic standard errors)
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 312.39074843 218.55031199 1.4294 0.15290
## log_tpop -18.73958548 57.91042534 -0.3236 0.74624
## medincome -0.00081614 0.00049078 -1.6630 0.09632
## powner -2.50142819 2.13552176 -1.1713 0.24146
## psingm 0.12741114 4.47115165 0.0285 0.97727
## p15_29 0.52396600 1.36867174 0.3828 0.70185
## pnhasn -2.05643989 1.64802119 -1.2478 0.21210
## phisp -1.78328725 0.98677043 -1.8072 0.07073
## pnhblk 1.91365273 1.72843021 1.1072 0.26822
## poth -2.72136563 4.47502574 -0.6081 0.54311
## pfborn 2.26949088 2.08613646 1.0879 0.27664
##
## Lambda: 0.16291, LR test value: 2.7943, p-value: 0.0946
## Asymptotic standard error: 0.090664
## z-value: 1.7968, p-value: 0.072363
## Wald statistic: 3.2286, p-value: 0.072363
##
## Log likelihood: -1898.376 for error model
## ML residual variance (sigma squared): 27028, (sigma: 164.4)
## Number of observations: 291
## Number of parameters estimated: 13
## AIC: NA (not available for weighted model), (AIC for lm: 3823.5)
# Print interpretation
cat("Percent Hispanic and Median Income remain statistically significant. While the Lambda value is rather low, it is still statistically significant (p-value = 0.01), indicating spatial dependency among the residuals. A Spatial Lag Error model's estimates can be interpreted as having marginal effects (unlike the spatial lag model) because the model only contains residual terms. This means that the model does not bias our coefficients, but it does bias our standard errors. Another way of looking at this is that the significance in our two predictor variables highlights a spatial autocorrelation problem that needs to be controlled.")
## Percent Hispanic and Median Income remain statistically significant. While the Lambda value is rather low, it is still statistically significant (p-value = 0.01), indicating spatial dependency among the residuals. A Spatial Lag Error model's estimates can be interpreted as having marginal effects (unlike the spatial lag model) because the model only contains residual terms. This means that the model does not bias our coefficients, but it does bias our standard errors. Another way of looking at this is that the significance in our two predictor variables highlights a spatial autocorrelation problem that needs to be controlled.
####FITTING A POISSON MODEL/NON-LINEAR SPLINES OF COORDINATES ONTO A LAG ERROR MODEL FOR LAG EFFECTS#### Poisson-Based Lag Error Effect Model
#gets the polygon centroids
centroids <- st_centroid(st_geometry(SD.city.tracts))
# Extract the latitude and longitude coordinates
coordinates <- st_coordinates(centroids)
longitude <- coordinates[, "X"]
latitude <- coordinates[, "Y"]
# Create a new data frame with latitude and longitude
xy <- data.frame(Latitude = latitude, Longitude = longitude)
#adds in the centroids
SD_2019Police_Census$X <- xy[,1]
SD_2019Police_Census$Y <- xy[,2]
#Runs the model
PoisGamModel19 <- gam(pcharge_19 ~ medincome + powner + psingm + p15_29 + pnhasn + phisp + pnhblk + poth + pfborn + te(X, Y, k = 5), family = poisson, offset = log_tpop, data=SD_2019Police_Census) #fits tensor splines
#Check for difference in p-value from original OLS model lag model and new Poisson lag error model
moran.mc(SD_2019Police_Census$pcharge_19, pd19w, zero.policy= T, nsim=999) #originally 0.001
##
## Monte-Carlo simulation of Moran I
##
## data: SD_2019Police_Census$pcharge_19
## weights: pd19w
## number of simulations + 1: 1000
##
## statistic = 0.18434, observed rank = 999, p-value = 0.001
## alternative hypothesis: greater
moran.mc(residuals(PoisGamModel19),sd.w.self19,nsim=999) #increased a lot to 0.96~0.97
##
## Monte-Carlo simulation of Moran I
##
## data: residuals(PoisGamModel19)
## weights: sd.w.self19
## number of simulations + 1: 1000
##
## statistic = 0.10711, observed rank = 33, p-value = 0.967
## alternative hypothesis: greater
###MAPS MAPS MAPS###
tm_style <- function(shape, col, title) {
tm_shape(shape, unit = "mi") +
tm_polygons(col = col, style = "quantile", title = title, palette = "Reds", border.alpha = 0) +
tm_legend(main.title = NULL, legend.outside = TRUE, frame = FALSE)
}
SanDiegoPC19 <- tm_style(SD_2019Police_CensusMAPS, "pcharge_19", "Total Police Reports (2019)")
SanDiegoTP <- tm_style(SD_2019Police_CensusMAPS, "tpop", "Total Population")
SanDiegoM <- tm_style(SD_2019Police_CensusMAPS, "medincome", "Median Income")
SanDiegoHW <- tm_style(SD_2019Police_CensusMAPS, "powner", "Percent Homeowner")
SanDiegoD <- tm_style(SD_2019Police_CensusMAPS, "psingm", "Percent Single Mother")
SanDiegoBC <- tm_style(SD_2019Police_CensusMAPS, "p15_29", "Percent Aged 15 to 29")
SanDiegoH <- tm_style(SD_2019Police_CensusMAPS, "phisp", "Percent Hispanic")
SanDiegoA <- tm_style(SD_2019Police_CensusMAPS, "pnhasn", "Percent Asian")
SanDiegoB <- tm_style(SD_2019Police_CensusMAPS, "pnhblk", "Percent Black")
SanDiegoFB <- tm_style(SD_2019Police_CensusMAPS, "pfborn", "Percent Foreign Born")
#Demographic Variables grouped by categories
SanDiegoTP
tmap_arrange(SanDiegoM, SanDiegoHW)
## Legend labels were too wide. The labels have been resized to 0.51, 0.51, 0.51, 0.47, 0.44. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.
## Some legend labels were too wide. These labels have been resized to 0.65, 0.59, 0.59, 0.59. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.
tmap_arrange(SanDiegoD,SanDiegoBC)
## Some legend labels were too wide. These labels have been resized to 0.65. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.
## Legend labels were too wide. The labels have been resized to 0.65, 0.59, 0.59, 0.59, 0.59. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.
tmap_arrange(SanDiegoH, SanDiegoA, SanDiegoB)
## Legend labels were too wide. The labels have been resized to 0.34, 0.31, 0.31, 0.31, 0.31. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.
## Legend labels were too wide. The labels have been resized to 0.37, 0.37, 0.34, 0.31, 0.31. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.
## Legend labels were too wide. The labels have been resized to 0.37, 0.37, 0.37, 0.37, 0.34. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.
#Race Variables Compared to % FB pop
tmap_arrange(SanDiegoH, SanDiegoFB)
## Legend labels were too wide. The labels have been resized to 0.65, 0.59, 0.59, 0.59, 0.59. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.
## Legend labels were too wide. The labels have been resized to 0.65, 0.59, 0.59, 0.59, 0.59. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.
tmap_arrange(SanDiegoA, SanDiegoFB)
## Some legend labels were too wide. These labels have been resized to 0.65, 0.59, 0.59. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.
## Legend labels were too wide. The labels have been resized to 0.65, 0.59, 0.59, 0.59, 0.59. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.
tmap_arrange(SanDiegoB, SanDiegoFB)
## Some legend labels were too wide. These labels have been resized to 0.65. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.
## Legend labels were too wide. The labels have been resized to 0.65, 0.59, 0.59, 0.59, 0.59. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.
#Pop compared to police reports
tmap_arrange(SanDiegoTP, SanDiegoPC19)
## Legend labels were too wide. The labels have been resized to 0.59, 0.59, 0.59, 0.59, 0.55. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.
#Socioeconomic Variables compared to police reports
tmap_arrange(SanDiegoM, SanDiegoPC19)
## Legend labels were too wide. The labels have been resized to 0.51, 0.51, 0.51, 0.47, 0.44. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.
tmap_arrange(SanDiegoHW, SanDiegoPC19)
## Some legend labels were too wide. These labels have been resized to 0.65, 0.59, 0.59, 0.59. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.
#Broken Social Ties and youth groups Variables compared to police reports
tmap_arrange(SanDiegoD, SanDiegoPC19)
## Some legend labels were too wide. These labels have been resized to 0.65. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.
tmap_arrange(SanDiegoBC, SanDiegoPC19)
## Legend labels were too wide. The labels have been resized to 0.65, 0.59, 0.59, 0.59, 0.59. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.
#Race Variables compared to police reports
tmap_arrange(SanDiegoH, SanDiegoPC19)
## Legend labels were too wide. The labels have been resized to 0.65, 0.59, 0.59, 0.59, 0.59. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.
tmap_arrange(SanDiegoA, SanDiegoPC19)
## Some legend labels were too wide. These labels have been resized to 0.65, 0.59, 0.59. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.
tmap_arrange(SanDiegoB, SanDiegoPC19)
## Some legend labels were too wide. These labels have been resized to 0.65. Increase legend.width (argument of tm_layout) to make the legend wider and therefore the labels larger.