Manuscript: Relationship of migratory blue-winged teal to Avian Influenza across the Central United States.

1 Preprocessing

Preprocessing conducted here is the same as that performed for BWTE2.

1.1 Poultry Data

USDA Census Data (1997-2012): https://quickstats.nass.usda.gov/

USDA.df = read.csv("C:/Users/humph173/Documents/LabWork/Patuxent/BWTE_2/BWTE2/Poultry_Census_all.csv", stringsAsFactors=FALSE)

dim(USDA.df)
## [1] 49855    21
head(USDA.df)
##   Program Year     Period Week.Ending Geo.Level   State State.ANSI
## 1  CENSUS 2012 END OF DEC          NA    COUNTY ALABAMA          1
## 2  CENSUS 2012 END OF DEC          NA    COUNTY ALABAMA          1
## 3  CENSUS 2012 END OF DEC          NA    COUNTY ALABAMA          1
## 4  CENSUS 2012 END OF DEC          NA    COUNTY ALABAMA          1
## 5  CENSUS 2012 END OF DEC          NA    COUNTY ALABAMA          1
## 6  CENSUS 2012 END OF DEC          NA    COUNTY ALABAMA          1
##   Ag.District Ag.District.Code  County County.ANSI Zip.Code Region
## 1  BLACK BELT               40 AUTAUGA           1       NA     NA
## 2  BLACK BELT               40  DALLAS          47       NA     NA
## 3  BLACK BELT               40  ELMORE          51       NA     NA
## 4  BLACK BELT               40  ELMORE          51       NA     NA
## 5  BLACK BELT               40  GREENE          63       NA     NA
## 6  BLACK BELT               40    HALE          65       NA     NA
##   watershed_code Watershed Commodity
## 1              0        NA   TURKEYS
## 2              0        NA  CHICKENS
## 3              0        NA  CHICKENS
## 4              0        NA  CHICKENS
## 5              0        NA  CHICKENS
## 6              0        NA  CHICKENS
##                                    Data.Item Domain Domain.Category Value
## 1                        TURKEYS - INVENTORY  TOTAL   NOT SPECIFIED   (D)
## 2             CHICKENS, BROILERS - INVENTORY  TOTAL   NOT SPECIFIED   (D)
## 3             CHICKENS, BROILERS - INVENTORY  TOTAL   NOT SPECIFIED   (D)
## 4 CHICKENS, PULLETS, REPLACEMENT - INVENTORY  TOTAL   NOT SPECIFIED   (D)
## 5             CHICKENS, ROOSTERS - INVENTORY  TOTAL   NOT SPECIFIED   (D)
## 6 CHICKENS, PULLETS, REPLACEMENT - INVENTORY  TOTAL   NOT SPECIFIED   (D)
##   CV....
## 1    (D)
## 2    (D)
## 3    (D)
## 4    (D)
## 5    (D)
## 6    (D)
unique(USDA.df$State)
##  [1] "ALABAMA"        "ALASKA"         "ARIZONA"        "ARKANSAS"      
##  [5] "CALIFORNIA"     "COLORADO"       "CONNECTICUT"    "DELAWARE"      
##  [9] "FLORIDA"        "GEORGIA"        "HAWAII"         "IDAHO"         
## [13] "ILLINOIS"       "INDIANA"        "IOWA"           "KANSAS"        
## [17] "KENTUCKY"       "LOUISIANA"      "MAINE"          "MARYLAND"      
## [21] "MASSACHUSETTS"  "MICHIGAN"       "MINNESOTA"      "MISSISSIPPI"   
## [25] "MISSOURI"       "MONTANA"        "NEBRASKA"       "NEVADA"        
## [29] "NEW HAMPSHIRE"  "NEW JERSEY"     "NEW MEXICO"     "NEW YORK"      
## [33] "NORTH CAROLINA" "NORTH DAKOTA"   "OHIO"           "OKLAHOMA"      
## [37] "OREGON"         "PENNSYLVANIA"   "RHODE ISLAND"   "SOUTH CAROLINA"
## [41] "SOUTH DAKOTA"   "TENNESSEE"      "TEXAS"          "UTAH"          
## [45] "VERMONT"        "VIRGINIA"       "WASHINGTON"     "WEST VIRGINIA" 
## [49] "WISCONSIN"      "WYOMING"
unique(USDA.df$Data.Item)
## [1] "TURKEYS - INVENTORY"                       
## [2] "CHICKENS, BROILERS - INVENTORY"            
## [3] "CHICKENS, PULLETS, REPLACEMENT - INVENTORY"
## [4] "CHICKENS, ROOSTERS - INVENTORY"            
## [5] "DUCKS - INVENTORY"                         
## [6] "CHICKENS, LAYERS - INVENTORY"

Organize Pultry Data First, finding total for each county, poultry class, and year. Second, averaging counties across years. Last, convert each poultry class to a seperate column.

USDA.df$Value2 = ifelse(USDA.df$Value == " (D)", 0, USDA.df$Value)
USDA.df$Value2 = as.numeric(gsub(",","",USDA.df$Value2))

USDA.piv = as.data.frame(
              USDA.df %>%
              group_by(State.ANSI, County.ANSI, Data.Item, Year) %>%
              summarise(InvSum = sum(Value2)) %>%
              group_by(State.ANSI, County.ANSI, Data.Item) %>%
              summarise(InvSum = mean(InvSum)))


USDA.piv$Data.Item = ifelse(USDA.piv$Data.Item == "TURKEYS - INVENTORY", "Turkey",
                      ifelse(USDA.piv$Data.Item == "CHICKENS, BROILERS - INVENTORY", "Broilers",
                        ifelse(USDA.piv$Data.Item == "CHICKENS, PULLETS, REPLACEMENT - INVENTORY", "Pullets",
                          ifelse(USDA.piv$Data.Item == "CHICKENS, ROOSTERS - INVENTORY", "Roosters",
                              ifelse(USDA.piv$Data.Item == "DUCKS - INVENTORY", "Ducks",
                                 ifelse(USDA.piv$Data.Item == "CHICKENS, LAYERS - INVENTORY", "Layers", NA))))))

USDA.piv = dcast(USDA.piv, State.ANSI + County.ANSI ~ Data.Item, value.var = "InvSum", fill=FALSE)

Join to County Uisng USDA Jurisdicational Boundaries: https://www.nass.usda.gov/Publications/AgCensus/2012/Online_Resources/Ag_Atlas_Maps/

Counties = readOGR(dsn = "./Bounds", 
                   layer = "CoUS_GCS12", 
                   stringsAsFactors = FALSE)
## OGR data source with driver: ESRI Shapefile 
## Source: "C:\Users\humph173\Documents\LabWork\Patuxent\BWTE_2\BWTE2\Bounds", layer: "CoUS_GCS12"
## with 3070 features
## It has 8 fields
LL84 = "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"

Counties = spTransform(Counties, LL84)

Counties$State.ANSI = as.integer(substr(Counties$atlas_stco, 1, 2))
Counties$County.ANSI = Counties$cntyn

Counties@data = merge(Counties@data, 
                       USDA.piv, all.x = TRUE, 
                       by = c("State.ANSI", "County.ANSI"))

Counties@data[is.na(Counties@data)] = 0

#State boundaries for mapping
States = readOGR(dsn = "./Bounds", 
                   layer = "StUS_GCS12", 
                   stringsAsFactors = FALSE)
## OGR data source with driver: ESRI Shapefile 
## Source: "C:\Users\humph173\Documents\LabWork\Patuxent\BWTE_2\BWTE2\Bounds", layer: "StUS_GCS12"
## with 48 features
## It has 7 fields
States$States = as.integer(States$atlas_st)

States = spTransform(States, LL84)

Creating Raster and Point Version for Mapping

tmp.ras = raster(res=0.2)
extent(tmp.ras) = extent(States)

Domain.r = rasterize(States,
                     tmp.ras,
                     field = 0,
                     background = NA)

Grd.pnt = rasterToPoints(Domain.r, sp = TRUE)

Grd.pnt@data = Grd.pnt@data %>%
               mutate(Long = Grd.pnt@coords[,1],
                      Lat = Grd.pnt@coords[,2]) %>%
               select(-layer)

Poultry Census Maps

USs_df = fortify(States)
## Regions defined for each Polygons
Counties@data$id = rownames(Counties@data)
USc_df = fortify(Counties, region = "id")
USc_df = left_join(USc_df, Counties@data, by = "id")

USc_df$brksB  = cut(USc_df$Broilers, right = F,
                    breaks = c(0, 100, 1000, 10000, 100000, 1000000, 10000000, 40000000),
                    labels = c("[0 - 100)", "[100 - 1 k)", "[1 K - 10 k)", "[10 k - 100 k)", "[100 k - 1 m)",
                               "[1 m - 10 m)", "[10 m - 40 m)"))

Br.plt = ggplot(USc_df, 
        aes(long,lat, group=group, fill = brksB)) + 
        geom_polygon(col="transparent") + 
        geom_polygon(data=USs_df, 
                   aes(long, lat, 
                       group=group), fill="transparent", 
                       col = "white", size=0.5) +
        scale_fill_viridis(name="Broiler", discrete=T, direction = -1) + 
        xlab(" ") +
        ylab("Latitude") +
        scale_x_longitude(xmin=-140, xmax=-50, step=10) +
        scale_y_latitude(ymin=-10, ymax=90, step=5) +
        coord_equal() + 
        theme(panel.grid.minor = element_blank(),
              panel.grid.major = element_blank(),
              panel.background = element_blank(),
              plot.background = element_blank(),
              panel.border = element_blank(),
              axis.text.x = element_blank(),
              axis.ticks.x=element_blank(),
              legend.title = element_text(size=12, face="bold"),
              axis.title.x = element_text(size=16, face="bold", colour="transparent"),
              axis.title.y = element_text(size=16, face="bold"),
              plot.title = element_blank())



USc_df$brksB  = cut(USc_df$Layers, right = F,
                    breaks= c(0, 100, 1000, 10000, 100000, 1000000, 10000000),
                    labels = c("[0 - 100)", "[100 - 1 k)", "[1 K - 10 k)", "[10 k - 100 k)", "[100 k - 1 m)",
                               "[1 m - 10 m)"))

La.plt = ggplot(USc_df, 
        aes(long,lat, group=group, fill = brksB)) + 
        geom_polygon(col="transparent") + 
        geom_polygon(data=USs_df, 
                   aes(long, lat, 
                       group=group), fill="transparent", 
                       col = "white", size=0.5) +
        scale_fill_viridis(name="Layer", discrete=T, direction = -1) + 
        xlab(" ") +
        ylab("Latitude") +
        scale_x_longitude(xmin=-140, xmax=-50, step=10) +
        scale_y_latitude(ymin=-10, ymax=90, step=5) +
        coord_equal() + 
        theme(panel.grid.minor = element_blank(),
              panel.grid.major = element_blank(),
              panel.background = element_blank(),
              plot.background = element_blank(),
              panel.border = element_blank(),
              legend.title = element_text(size=12, face="bold"),
              axis.text.x = element_blank(),
              axis.ticks.x=element_blank(),
              axis.text.y = element_blank(),
              axis.ticks.y=element_blank(),
              axis.title.x = element_text(size=16, face="bold", colour="transparent"),
              axis.title.y = element_text(size=16, face="bold", colour="transparent"),
              plot.title = element_blank())



USc_df$brksB  = cut(USc_df$Pullets, right = F,
                    breaks= c(0, 100, 1000, 10000, 100000, 1000000, 2000000, 3000000),
                    labels = c("[0 - 100)", "[100 - 1 k)", "[1 K - 10 k)", "[10 k - 100 k)", "[100 k - 1 m)",
                               "[1 m - 2 m)", "[2 m - 3 m)"))

Pu.plt = ggplot(USc_df, 
        aes(long,lat, group=group, fill = brksB)) + 
       geom_polygon(col="transparent") + 
        geom_polygon(data=USs_df, 
                   aes(long, lat, 
                       group=group), fill="transparent", 
                       col = "white", size=0.5) +
        scale_fill_viridis(name="Pullet", discrete=T, direction = -1) + 
        xlab(" ") +
        ylab("Latitude") +
        scale_x_longitude(xmin=-140, xmax=-50, step=10) +
        scale_y_latitude(ymin=-10, ymax=90, step=5) +
        coord_equal() + 
        theme(panel.grid.minor = element_blank(),
              panel.grid.major = element_blank(),
              panel.background = element_blank(),
              plot.background = element_blank(),
              panel.border = element_blank(),
              axis.text.x = element_blank(),
              axis.ticks.x=element_blank(),
              legend.title = element_text(size=12, face="bold"),
              axis.title.x = element_text(size=16, face="bold", colour="transparent"),
              axis.title.y = element_text(size=16, face="bold"),
              plot.title = element_blank())

USc_df$brksB  = cut(USc_df$Roosters, right = F,
                    breaks= c(0, 100, 1000, 10000, 100000, 300000),
                    labels = c("[0 - 100)", "[100 - 1 k)", "[1 K - 10 k)", "[10 k - 100 k)", "[100 k - 300 k)"))

Ro.plt = ggplot(USc_df, 
        aes(long,lat, group=group, fill = brksB)) + 
        geom_polygon(col="transparent") + 
        geom_polygon(data=USs_df, 
                   aes(long, lat, 
                       group=group), fill="transparent", 
                       col = "white", size=0.5) +
        scale_fill_viridis(name="Rooster", discrete=T, direction = -1) + 
        xlab("Longitude") +
        ylab("Latitude") +
        scale_x_longitude(xmin=-140, xmax=-50, step=10) +
        scale_y_latitude(ymin=-10, ymax=90, step=5) +
        coord_equal() + 
        theme(panel.grid.minor = element_blank(),
              panel.grid.major = element_blank(),
              panel.background = element_blank(),
              plot.background = element_blank(),
              panel.border = element_blank(),
              legend.title = element_text(size=12, face="bold"),
              axis.text.x = element_blank(),
              axis.ticks.x=element_blank(),
              axis.text.y = element_blank(),
              axis.ticks.y=element_blank(),
              axis.title.x = element_text(size=16, face="bold", colour="transparent"),
              axis.title.y = element_text(size=16, face="bold", colour="transparent"),
              plot.title = element_blank())




USc_df$brksB  = cut(USc_df$Turkey, right = F,
                    breaks= c(0, 100, 1000, 10000, 100000, 1000000, 2000000, 3000000, 4000000),
                    labels = c("[0 - 100)", "[100 - 1 k)", "[1 K - 10 k)", "[10 k - 100 k)", "[100 k - 1 m)",
                               "[1 m - 2 m)", "[2 m - 3 m)", "[3 m - 4 m)"))

Tu.plt = ggplot(USc_df, 
        aes(long,lat, group=group, fill = brksB)) + 
        geom_polygon(col="transparent") + 
        geom_polygon(data=USs_df, 
                   aes(long, lat, 
                       group=group), fill="transparent", 
                       col = "white", size=0.5) +
        scale_fill_viridis(name="Turkey", discrete=T, direction = -1) + 
        xlab(" ") +
        ylab("Latitude") +
        scale_x_longitude(xmin=-140, xmax=-50, step=10) +
        scale_y_latitude(ymin=-10, ymax=90, step=5) +
        coord_equal() + 
        theme(panel.grid.minor = element_blank(),
              panel.grid.major = element_blank(),
              panel.background = element_blank(),
              plot.background = element_blank(),
              panel.border = element_blank(),
              #axis.text.x = element_blank(),
              axis.ticks.x=element_blank(),
              legend.title = element_text(size=12, face="bold"),
              axis.title.x = element_text(size=16, face="bold"),
              axis.title.y = element_text(size=16, face="bold"),
              plot.title = element_blank())



USc_df$brksB  = cut(USc_df$Ducks, right = F,
                    breaks= c(0, 100, 1000, 10000, 100000, 1000000),
                    labels = c("[0 - 100)", "[100 - 1 k)", "[1 K - 10 k)", "[10 k - 100 k)", "[100 k - 1 m)"))

du.plt = ggplot(USc_df, 
        aes(long,lat, group=group, fill = brksB)) + 
        geom_polygon(col="transparent") + 
        geom_polygon(data=USs_df, 
                   aes(long, lat, 
                       group=group), fill="transparent", 
                       col = "white", size=0.5) +
        scale_fill_viridis(name="Duck", discrete=T, direction = -1) + 
        xlab("Longitude") +
        ylab(" ") +
        scale_x_longitude(xmin=-140, xmax=-50, step=10) +
        scale_y_latitude(ymin=-10, ymax=90, step=5) +
        coord_equal() + 
         theme(panel.grid.minor = element_blank(),
              panel.grid.major = element_blank(),
              panel.background = element_blank(),
              plot.background = element_blank(),
              panel.border = element_blank(),
              legend.title = element_text(size=12, face="bold"),
              #axis.text.x = element_blank(),
              #axis.ticks.x=element_blank(),
              axis.text.y = element_blank(),
              axis.ticks.y=element_blank(),
              axis.title.x = element_text(size=16, face="bold"),
              axis.title.y = element_text(size=16, face="bold", colour="transparent"),
              plot.title = element_blank())


grid.arrange(Br.plt, La.plt, Pu.plt, Ro.plt, Tu.plt, du.plt, ncol = 2)

1.2 ARGOS Telemetry Data

Removing “dead” locations.
Selecting “plausible” records from the DAF_Filter.
Formating date, adding local time (America/Chicago) and getting Hour, Day, Week, season.

Argos_sp = read.csv("C:/Users/humph173/Documents/LabWork/Patuxent/Douglas_080218/BWTeal_2013-2017_Ramey_argos_tabular_diag_gis_filtered.csv",
                    stringsAsFactors=FALSE,
                    header = TRUE, sep=",")[,1:35] %>% 
                    filter(Fate == "alive",
                           DAF_Filter == 0) %>%
                    mutate(ID = Animal_ID,
                           ObsDate = as.POSIXct(Location_Datetime_GMT, 
                                                tz = "GMT", 
                                                format = "%Y/%m/%d %H:%M:%S"),
                           ObsDateD = as.Date(Location_Datetime_GMT),
                           LocT = ymd_hms(format(ObsDate, tz="America/Chicago", usetz=TRUE)),
                           LocHR = hour(LocT),
                           LocDY = day(LocT),
                           LocWK = week(LocT),
                           LocMN = month(LocT),
                           LocYR = year(LocT))

Argos_sp$Season = ifelse(Argos_sp$LocMN == 12 | Argos_sp$LocMN == 1, "Winter",
                      ifelse(Argos_sp$LocMN == 3, "Spring",
                             ifelse(Argos_sp$LocMN >= 4 & Argos_sp$LocMN <= 8, "Breeding",
                                    ifelse(Argos_sp$LocMN >= 9 & Argos_sp$LocMN <= 11, "Fall",
                                           ifelse(Argos_sp$LocMN == 2 & Argos_sp$Day <= 15, "Winter",
                                                  ifelse(Argos_sp$LocMN == 2 & Argos_sp$Day >= 16, "Spring",
                                                         NA))))))
#Convert to points
Locs = cbind(Argos_sp$Longitude, Argos_sp$Latitude)

Sp.pnts = SpatialPointsDataFrame(Locs, Argos_sp)
proj4string(Sp.pnts) = proj4string(States)

Sp.pnts$US = is.na(over(Sp.pnts, States))[,1]

Sp.pnts = subset(Sp.pnts, US == "FALSE")

nProj = nProj = "+proj=merc +a=6378137 +b=6378137 +lat_ts=0 +lon_0=0
                 +x_0=0 +y_0=0 +k=1 +units=km +nadgrids=@null +no_defs"

Sp.pntsp = spTransform(Sp.pnts, nProj)

Countiesp = spTransform(Counties, nProj)

Statesp = spTransform(States, nProj)

2 Estimate Migration Timing

Later used to filter data.

MTimes = Sp.pnts@data

MTimes$DOY = yday(MTimes$LocT)

MTimes.df = as.data.frame(
                    MTimes %>%
                      group_by(DOY) %>%
                      summarise(Med = median(Latitude)))

MTimes.df$lag1 = lag(MTimes.df$Med)
MTimes.df$Ldiff = MTimes.df$Med - MTimes.df$lag1

#Rolling Median
RollM = rollmedianr(MTimes.df$Med, k=5, align = "right")
RollM2 = lag(RollM)

Roll.df = as.data.frame(cbind(RollM, RollM2))
Roll.df$Diff = Roll.df$RollM - Roll.df$RollM2

#Days
#76 = March 17
#165 = June 14
#250 = Sept 7
#334 = Nov 30

Figure
Rolling median of marked bird movement timing. Used to define seasons to filter data.

MRanges = data.frame(
                from=c(1, 76, 165, 250, 334), 
                to=c(76,165, 250, 334, 365),
                Season = as.factor(c("Overwinter", "Spring", "Breeding", "Fall", "Overwinter")))


MTimes.df$RollMed = rollmedianr(MTimes.df$Med, k=7, align = "left")[1:length(MTimes.df$Med)]

levels(MRanges$Season) 
## [1] "Breeding"   "Fall"       "Overwinter" "Spring"
MRanges$Season <- ordered(MRanges$Season, levels = c("Overwinter", "Spring", "Breeding", "Fall"))

myCol = c("darkgreen", "tan", "darkred", "gray")
names(myCol) = levels(factor(MRanges$Season))

colScale = scale_fill_manual(name = "Season", values = myCol, 
                               labels = c("Winter", "Spring", "Breeding", "Fall"))
                               

Lplot = ggplot() + geom_rect(data = MRanges, 
                         aes(xmin = from, xmax = to, 
                             ymin = -Inf, ymax = Inf, 
                             fill = Season), alpha=0.2) +
                         colScale



Lplot + geom_point(data =MTimes.df, aes(DOY, Med), shape=1, size = 1.5, col="black") +
              geom_smooth(data =MTimes.df, aes(DOY, RollMed),
                          span = 0.5, method = "loess",
                          se = F,
                          col="black",
                          size = 1.2,
                          linetype = "dashed") +
              geom_vline(xintercept=76) + #1.5
              geom_vline(xintercept=165) +
              geom_vline(xintercept=250) +
              geom_vline(xintercept=334) +
         xlab("Day of Year ") +
         ylab("Latitude (median)") +  
         theme_classic() +
         scale_x_continuous(breaks = c(0, MRanges$from, 365), 
                            limits =c(1,365),
                            labels = c("", 1,  76, 165, 250, 334, 365)) +
            theme(plot.title = element_text(hjust = 0.5),
                  legend.position = "bottom",
                  legend.title=element_text(size=22), 
                  legend.text=element_text(size=18),
                  panel.grid.major = element_blank(),
                  panel.grid.minor = element_blank(),
                  strip.background = element_blank(),
                  title = element_text(face="bold", size=18, hjust=0.5),
                  axis.text=element_text(size=16),
                  strip.text = element_text(face="bold", size = 20),
                  axis.title.y = element_text(face="bold", size = 24),
                  axis.text.x = element_text(face="bold", size=14, vjust=0.5, 
                                             hjust=0.5, angle=0),
                  axis.title.x = element_text(face="bold", size = 24))

2.1 AI Outbreaks and Surveillance

Load EMPRES Data Compare to migration timing.

CleanEMPRES("C:/Users/humph173/Documents/LabWork/Patuxent/EMPRES", "EMPRES.cln")
EMPRES.cln = as.data.frame(EMPRES.cln) %>% select(-localityQuality)

CleanEMPRES2("C:/Users/humph173/Documents/LabWork/Patuxent/EMPRESv", "EMPRES2.cln") 
EMPRES2.cln = as.data.frame(EMPRES2.cln)

EMPRES.cln = EMPRES.cln %>% select(Lat, Long, AI, Week, ObsDate)
EMPRES2.cln = EMPRES2.cln %>% select(Lat, Long, AI, Week, ObsDate)

EMPRES.cln = rbind(EMPRES.cln, EMPRES2.cln)

EMPRES.cln$H3 = grepl("H3", EMPRES.cln$AI)
EMPRES.cln$H4 = grepl("H4", EMPRES.cln$AI)
EMPRES.cln$H5 = grepl("H5", EMPRES.cln$AI)
EMPRES.cln$H7 = grepl("H7", EMPRES.cln$AI)


E.df = EMPRES.cln %>%
          filter(H3 == TRUE |
                 H4 == TRUE |
                 H5 == TRUE |
                 H7 == TRUE) %>%
          select(Long, Lat, H3, H4, H5, H7, AI, Week, ObsDate)

OutBrk.pnts = SpatialPointsDataFrame(E.df[,c("Long", "Lat")], E.df)
proj4string(OutBrk.pnts) = proj4string(Sp.pnts)

OutBrk.pnts$Domain = is.na(over(OutBrk.pnts, States))[,1]
OutBrk.pnts = subset(OutBrk.pnts, Domain == "FALSE")

OutBrk.pnts$Hem = ifelse(OutBrk.pnts$H3 == TRUE, "H3",
                    ifelse(OutBrk.pnts$H4 == TRUE, "H4",
                        ifelse(OutBrk.pnts$H5 == TRUE, "H5",
                            ifelse(OutBrk.pnts$H7 == TRUE, "H7", NA))))
  
  
OutBrk.pnts@data = OutBrk.pnts@data %>%
                    mutate(Source = "EMPRES",
                           DOY = yday(ObsDate)) %>%
                    select(Long, Lat, DOY, Week, Hem, Source)

Load AI Surveilance Load IRD Data.

IRD.df = read.csv("./IRD/IRD_BWTE_021219.csv", stringsAsFactors=FALSE)

IRD2.df = IRD.df %>%
            mutate(Long = as.numeric(Longitude),
                   Lat = as.numeric(Latitude),
                   ObsDate = ymd(Collection.Date),
                   Year = as.integer(substr(ObsDate,0, 4)),
                   Month = as.integer(format(ObsDate, "%m")),
                   Week = as.integer(strftime(ObsDate, format = "%V")),
                   Day = as.integer(format(ObsDate, "%d")),
                   AI = Subtype,
                   Test = Flu.Test.Status) %>%
            filter(is.na(Long) == FALSE,
                   is.na(Lat) == FALSE,
                   is.na(Collection.Date) == FALSE,
                   Country == "USA") %>%
            select(Long, Lat, AI, ObsDate, Year, Month, Week, Day, Test)

range(IRD2.df$Year)
## [1] 1986 2018
Em.pnts = SpatialPointsDataFrame(IRD2.df[,c("Long", "Lat")], IRD2.df)
proj4string(Em.pnts) = proj4string(States)

Sureveillance Counts Panel

Panel.week = Em.pnts@data

Panel.week$Hem = substr(Panel.week$AI, 1, 2)

Panel.week = Panel.week %>%
             filter(Hem != "-N" & Hem != "Mi" & Hem != "Hx")

Panel.week = as.data.frame(
              Panel.week %>%
              group_by(Week, Hem) %>%
              summarise(Count = length(Hem)))

unique(Panel.week$Hem)
## [1] "H2" "H7" "H1" "H3" "H5" "H8" "H4" "H6" "H9"
Panel.week %>%
  group_by(Hem) %>%
  summarise(Count = sum(Count))
## # A tibble: 9 x 2
##   Hem   Count
##   <chr> <int>
## 1 H1       82
## 2 H2       14
## 3 H3      283
## 4 H4      554
## 5 H5        4
## 6 H6       19
## 7 H7       65
## 8 H8        1
## 9 H9        3
df2 = ddply(Panel.week, .(Hem), transform, percent = Count/sum(Count) * 100)

df2 = df2 %>% filter(Hem == "H3" | Hem == "H4" | Hem == "H5" | Hem == "H7")

ggplot(df2, aes(Week, percent)) + 
                 geom_bar(stat="identity", fill = "darkgray") +
                 facet_grid(rows = vars(df2$Hem)) +
                  #coord_flip() +
                      theme_classic() +
                       xlab("Week of Year ") +
                       ylab("Sample (%)") +
                       xlim(1, 52) +
                        theme(plot.title = element_text(hjust = 0.5),
                              legend.position = "bottom",
                              legend.title=element_text(size=22), 
                              legend.text=element_text(size=18),
                              #panel.grid.major = element_blank(),
                              #panel.grid.minor = element_blank(),
                              strip.background = element_blank(),
                              #panel.border = element_rect(colour = "black"),
                              title = element_text(face="bold", size=22, hjust=0.5),
                              strip.text = element_text(face="bold", size = 22),
                              axis.title.y = element_text(face="bold", size = 24),
                              axis.text.x = element_text(face="bold", size=16, vjust=0.5, 
                                                         hjust=0.5, angle=0),
                              axis.text.y = element_text(face="bold", size=20),
                              axis.title.x = element_text(face="bold", size = 22))

Isolates Table

#H3, H4, H5, H7
IRD.set = Em.pnts@data

IRD.set$Hem = substr(IRD.set$AI, 1, 2)

IRD.set$Dups = duplicated(IRD.set[,c("Lat","Long", "Hem")])

IRD.set = IRD.set %>%
            filter(Hem == "H3" | Hem == "H4" | Hem == "H5" | Hem == "H7" & Dups == TRUE)

IRD.set = IRD.set %>%
          mutate(Source = "IRD",
                 DOY = yday(ObsDate)) %>%
          select(Long, Lat, DOY, Week, Hem, Source)

IRD.set = rbind(IRD.set, OutBrk.pnts@data)

MRanges
##   from  to     Season
## 1    1  76 Overwinter
## 2   76 165     Spring
## 3  165 250   Breeding
## 4  250 334       Fall
## 5  334 365 Overwinter
IRD.set$Period = ifelse(IRD.set$DOY >= 1 & IRD.set$DOY < 76, "Winter",
                        ifelse(IRD.set$DOY >= 76 & IRD.set$DOY < 165, "Spring",
                            ifelse(IRD.set$DOY >= 165 & IRD.set$DOY < 250, "Breeding",
                                ifelse(IRD.set$DOY >= 250 & IRD.set$DOY < 334, "Fall",
                                    ifelse(IRD.set$DOY >= 334 & IRD.set$DOY <= 365, "Winter", NA)))))



IRD.set %>%
  group_by(Period, Source, Hem) %>%
  summarise(Count = length(Hem))
## # A tibble: 17 x 4
## # Groups:   Period, Source [8]
##    Period   Source Hem   Count
##    <chr>    <chr>  <chr> <int>
##  1 Breeding EMPRES H5        1
##  2 Breeding EMPRES H7        3
##  3 Breeding IRD    H3       51
##  4 Breeding IRD    H4      104
##  5 Fall     EMPRES H7        1
##  6 Fall     IRD    H3      230
##  7 Fall     IRD    H4      450
##  8 Fall     IRD    H5        3
##  9 Fall     IRD    H7        2
## 10 Spring   EMPRES H5        7
## 11 Spring   EMPRES H7        4
## 12 Spring   IRD    H3        2
## 13 Spring   IRD    H5        1
## 14 Spring   IRD    H7       49
## 15 Winter   EMPRES H5       13
## 16 Winter   EMPRES H7       14
## 17 Winter   IRD    H7        2
ReadTab = read.csv("./IsoTable.csv")[1:5,1:6]

kable(ReadTab, 
      caption = "Isolates Table") %>%
      kable_styling("striped", full_width = F) %>%
      row_spec(0, font_size = 20) %>%
      column_spec(1, bold = T)
Isolates Table
Season H3 H4 H5 H7 Total
Winter 0 0 13 (13) 16 (14) 29
Spring 2 0 8 (7) 53 (4) 63
Breeding 51 104 1 (1) 3 (3) 159
Fall 230 450 3 3 (1) 686
Total 283 554 25 75 937

Isolates by Migration timing and Latitude

Plot.df = Sp.pnts@data
Plot.df$DOY = yday(Plot.df$LocT)

Qk.frm = as.data.frame(
            Plot.df %>%
              group_by(Animal_ID, DOY) %>%
              summarise(Mean = median(Latitude)))



Iso.frm = as.data.frame(
            Plot.df %>%
              group_by(LocWK) %>%
              summarise(Max = median(Latitude)))

Qk.frm$Period = ifelse(Qk.frm$DOY >= 1 & Qk.frm$DOY < 76, "Winter",
                        ifelse(Qk.frm$DOY >= 76 & Qk.frm$DOY < 165, "Spring",
                            ifelse(Qk.frm$DOY >= 165 & Qk.frm$DOY < 250, "Breeding",
                                ifelse(Qk.frm$DOY >= 250 & Qk.frm$DOY < 334, "Fall",
                                    ifelse(Qk.frm$DOY >= 334 & Qk.frm$DOY <= 365, "Winter", NA)))))


IRD.set.plt = IRD.set
IRD.set.plt$Dups = duplicated(IRD.set.plt[, c("Lat", "Long", "Hem", "Source", "DOY")])
IRD.set.plt = IRD.set.plt %>% filter(Dups == FALSE)

myShapes = c(0:3)
names(myShapes) = levels(factor(IRD.set.plt$Hem))

ShpScale = scale_shape_manual(name = "Isolate", values = myShapes,
                             labels = c("H3", "H4", "H5", "H7"))

colScale = scale_colour_manual(name = "     ", values = c("red", "black"), 
                               labels = c("Outbreak", "Surveillance"))

ggplot(Qk.frm, aes(DOY, Mean)) +
      geom_point(data = Qk.frm, 
                  aes(DOY, Mean),
                  shape = 19,
                  size = 6,
                  alpha = 0.1,
                  #fill = "dodgerblue4",
                  col = "darkgray",
                  stroke = 0,
                  position = position_jitter(w = 0, h = 0.25)) +
      geom_point(data = IRD.set.plt,
                 aes(DOY, Lat, 
                 shape=factor(IRD.set.plt$Hem), 
                 col = factor(IRD.set.plt$Source)), 
                 size = 3,
                 stroke = 0.9,
                 position = position_jitter(w = 0.5, h = 0.5)) +
      ShpScale +
      colScale +
      scale_y_continuous(breaks = scales::pretty_breaks(8), limits = c(25, NA)) +
      theme_classic() +
      ylab("Latitude (median)") +
      xlab("Day of Year") +
      scale_x_continuous(breaks = seq(0, 365, 73), 
                            limits =c(1,365),
                            labels = c( 1, seq(73, 365, 73))) +
      theme(plot.title = element_text(hjust = 0.5),
            legend.position = "bottom", #c(0.15, 0.7),
            legend.title=element_text(size=22), 
            legend.text=element_text(size=18),
            panel.grid.major = element_blank(),
            panel.grid.minor = element_blank(),
            strip.background = element_blank(),
            #panel.border = element_rect(colour = "black"),
            title = element_text(face="bold", size=18, hjust=0.5),
            axis.text=element_text(size=18),
            strip.text = element_text(face="bold", size = 20),
            axis.title.y = element_text(face="bold", size = 24),
            axis.text.x = element_text(face="bold", size=14, vjust=0.5, 
                                       hjust=0.5, angle=0),
            axis.title.x = element_text(face="bold", size = 24)) 

Isolates by Migration timing and Latitude (Panel Version)

Qk.frmp1 = Qk.frmp2 = Qk.frmp3 = Qk.frmp = Qk.frm
Qk.frmp$Hem = "H3"
Qk.frmp1$Hem = "H4"
Qk.frmp2$Hem = "H5"
Qk.frmp3$Hem = "H7"

Qk.frmp = rbind(Qk.frmp, Qk.frmp1, Qk.frmp2, Qk.frmp3)

ggplot(Qk.frmp, aes(DOY, Mean)) +
      geom_point(data = Qk.frmp, 
                  aes(DOY, Mean),
                  shape = 19,
                  size = 6,
                  alpha = 0.1,
                  #fill = "dodgerblue4",
                  col = "darkgray",
                  stroke = 0,
                  position = position_jitter(w = 0, h = 0.25)) +
      geom_point(data = IRD.set.plt,
                 aes(DOY, Lat, 
                 shape=factor(IRD.set.plt$Hem), 
                 col = factor(IRD.set.plt$Source)), 
                 size = 3,
                 stroke = 0.9,
                 position = position_jitter(w = 0.5, h = 0.5)) +
      #geom_density_2d(data = IRD.set.plt,
      #                  aes(DOY, Lat)) +
      ShpScale +
      colScale +
      #scale_colour_manual(values=c("red","blue")) +
      facet_wrap(~Hem, ncol=2, scales = "free") +
      scale_y_continuous(breaks = scales::pretty_breaks(8), limits = c(25, NA)) +
      theme_classic() +
      ylab("Latitude (median)") +
      xlab("Day of Year") +
      scale_x_continuous(breaks = seq(0, 365, 73), 
                            limits =c(1,365),
                            labels = c( 1, seq(73, 365, 73))) +
      theme(plot.title = element_text(hjust = 0.5),
            legend.position = "bottom", #c(0.15, 0.7),
            legend.title=element_text(size=22), 
            legend.text=element_text(size=18),
            panel.grid.major = element_blank(),
            panel.grid.minor = element_blank(),
            strip.background = element_blank(),
            #panel.border = element_rect(colour = "black"),
            title = element_text(face="bold", size=18, hjust=0.5),
            axis.text=element_text(size=18),
            strip.text = element_text(face="bold", size = 20),
            axis.title.y = element_text(face="bold", size = 24),
            axis.text.x = element_text(face="bold", size=14, vjust=0.5, 
                                       hjust=0.5, angle=0),
            axis.title.x = element_text(face="bold", size = 24))

Isolates by Migration timing and Latitude (Map Version)

wmap_df = fortify(States)
## Regions defined for each Polygons
wmap_df1 = wmap_df2 = wmap_df3 = wmap_df

wmap_df$Period = "Fall"
wmap_df1$Period = "Spring"
wmap_df2$Period = "Breeding"
wmap_df3$Period = "Winter"

wmap_df = rbind(wmap_df, wmap_df1, wmap_df2, wmap_df3)
wmap_df$Period = as.factor(wmap_df$Period)

wmap_df$Period = ordered(wmap_df$Period, levels = c("Winter", "Spring", "Breeding", "Fall"))

tmp.df = IRD.set 
tmp.df$Period = as.factor(tmp.df$Period)
tmp.df$Period = ordered(tmp.df$Period, levels = c("Winter", "Spring", "Breeding", "Fall"))

tmp.df$Dups = duplicated(tmp.df[,c("Long","Lat","Period", "Hem", "Source", "Week")])
tmp.df = tmp.df %>% filter(Dups == FALSE)

ggplot(wmap_df, aes(long,lat, group=group)) + 
        geom_polygon(fill = "lightgray", col="darkgray") + 
        geom_point(data = tmp.df,
                 aes(Long, Lat, group = NA,
                 shape=factor(tmp.df$Hem), 
                 col = factor(tmp.df$Source)), 
                 size = 3,
                 stroke = 0.9,
                 position = position_jitter(w = 1, h = 1)) +
        ShpScale +
        colScale +
        facet_wrap(~Period, ncol = 2) +
        xlab("Longitude") +
        ylab("Latitude") +
        coord_equal() + 
        theme_classic() +
        theme(plot.title = element_text(hjust = 0.5),
                              legend.position = "bottom",
                              legend.title=element_text(size=22), 
                              legend.text=element_text(size=18),
                              #panel.grid.major = element_blank(),
                              #panel.grid.minor = element_blank(),
                              strip.background = element_blank(),
                              #panel.border = element_rect(colour = "black"),
                              title = element_text(face="bold", size=22, hjust=0.5),
                              strip.text = element_text(face="bold", size = 22),
                              axis.title.y = element_text(face="bold", size = 24),
                              axis.text.x = element_text(face="bold", size=18, vjust=0.5, 
                                                         hjust=0.5, angle=0),
                              axis.text.y = element_text(face="bold", size=18),
                              axis.title.x = element_text(face="bold", size = 22))

3 Utilization Distributions

Construct Utilization Distributions (UDs) for Spring and Fall Migrations

3.1 Spring Migration UD

Prep Data.

MProj = "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=37.5 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs"

Statesplt = spTransform(States, MProj)

MoveBirds = spTransform(Sp.pnts, MProj)
MoveBirds$DOY = yday(MoveBirds$LocT)


tmp.ras = raster(res=0.71)
extent(tmp.ras) = extent(States)

UD.r = rasterize(States,
                     tmp.ras,
                     field = 0,
                     background = NA)

UD.r1 = projectRaster(UD.r, crs=CRS(proj4string(MoveBirds)))
UD.r = extend(UD.r1, c(200,200))

MRanges 
##   from  to     Season
## 1    1  76 Overwinter
## 2   76 165     Spring
## 3  165 250   Breeding
## 4  250 334       Fall
## 5  334 365 Overwinter
MoveBirds.df = MoveBirds@data %>%
                mutate(Long = MoveBirds@coords[,1],
                       Lat = MoveBirds@coords[,2],
                       Type = "gps",
                       Error = Error_Radius) %>%
                filter(DOY >= 76 & DOY <= 165) %>% #Filetr to spring
                select(Animal_ID, Long, Lat, LocT, Type, Error)

MoveBirds.df$Duplicated = duplicated(MoveBirds.df[, c("Animal_ID", "LocT")])

MoveBirds.df = MoveBirds.df %>% filter(Duplicated == FALSE)

getDuplicatedTimestamps(x=as.factor(MoveBirds.df$Animal_ID),
                        timestamps=MoveBirds.df$LocT,
                        sensorType = MoveBirds.df$Type)
## NULL
MoveBirds.df = arrange(MoveBirds.df, Animal_ID)

IDCount = as.data.frame(
              MoveBirds.df %>%
                group_by(Animal_ID) %>%
                summarise(Count = length(Animal_ID)))

Loop through UDs by Individual Birds. Save each to folder.

Bird.lvls = levels(factor(MoveBirds.df$Animal_ID)) #Birds

for(i in 1:length(Bird.lvls)){

    MoveBirds.df2 = MoveBirds.df %>% filter(Animal_ID == Bird.lvls[i])

    Chk.pnts = subset(MoveBirds, Animal_ID == Bird.lvls[i])
    
    Fixes = length(MoveBirds.df2$Animal_ID)
    
    if(Fixes < 31){next} #Check that we have enough records
      else{
        
         Duration.m = as.numeric(max(MoveBirds.df2$LocT) - min(MoveBirds.df2$LocT)) #total duration for bird
          
         BirdM = move(x=MoveBirds.df2$Long, #Foramt for the *move* package
                  y=MoveBirds.df2$Lat, 
                  time=MoveBirds.df2$LocT,
                  proj=CRS(proj4string(MoveBirds)),
                  data=MoveBirds.df2,  
                  animal=factor(MoveBirds.df2$Animal_ID))
      
          dbbmm1 = brownian.bridge.dyn(object=BirdM,
                                       raster = UD.r,
                                       location.error= MoveBirds.df2$Error,
                                       time.step=15,
                                       window.size=31, #Median sampling @ 48min.  30*48 = 1 day, must be odd
                                       margin=11) #1/2 of window, shifted down to be odd #Kranstauber et al 2012
          
          writeRaster(dbbmm1, paste("./UD_rasters/", Bird.lvls[i], ".tif", sep=""), overwrite=TRUE)
          
          Temp.rast = raster(paste("./UD_rasters/", Bird.lvls[i], ".tif", sep=""))
          
          values(Temp.rast) = Scale(values(Temp.rast))
          
          Temp.rast = Temp.rast*Duration.m
          
          writeRaster(Temp.rast, paste("./UD_rasters/", Bird.lvls[i], ".tif", sep=""), overwrite=TRUE)
          
      }
    
}

Sum and scale inividual bird UDs

#REad individual birds        
UDfiles = list.files(path="./UD_rasters", 
                     pattern="*.tif", full.names=T, recursive=T)

#sum all birds 
UD.stk = stack(UDfiles)
UD.sum = sum(UD.stk)

#map extent
UD.sum = crop(UD.sum, extent(UD.r1))
UD.sum = UD.sum + UD.r1

UD.sum = projectRaster(UD.sum, crs=CRS(proj4string(States)))

#Scale to 1
UD.sum = disaggregate(UD.sum, fact = 5, method="bilinear")
values(UD.sum) = Scale(values(UD.sum))

UD.sum.cont = UD.sum

ValVect = values(UD.sum)
ValVect[ValVect == 0] = NA

Quantiles = quantile(ValVect, probs = c(0.5,0.75,0.99), na.rm=T)

#Thresholds
Quant1 = quantile(ValVect, probs = 0.5, na.rm=T)
Quant2 = quantile(ValVect, probs = 0.75, na.rm=T)
Quant3 = quantile(ValVect, probs = 0.99, na.rm=T)

Contours = rasterToContour(UD.sum, levels=Quantiles)
Contour05 = rasterToContour(UD.sum, levels=Quant3)

values(UD.sum) = ifelse(values(UD.sum) < Quant1, 1,
                    ifelse(values(UD.sum) >= Quant1 & values(UD.sum) < Quant2, 2,
                        ifelse(values(UD.sum) >= Quant2 & values(UD.sum) < Quant3, 3,
                            ifelse(values(UD.sum) >= Quant3, 4, NA))))


IRD.set %>%
  group_by(Period) %>%
  summarise(Cnt = length(Period))
## # A tibble: 4 x 2
##   Period     Cnt
##   <chr>    <int>
## 1 Breeding   159
## 2 Fall       686
## 3 Spring      63
## 4 Winter      29
#View
mCols = c("#A50026", "#D73027", "#F46D43", "#FDAE61", "#E0F3F8", "#ABD9E9", "#74ADD1")

mCols = c("yellow2", "orange", "red")

rng = seq(1, 4, 1)

SpingLC.test = UD.sum
UD.sumX = UD.sum
UD.sumX[UD.sumX == 1] = NA

#cr = colorRampPalette(c(rev(mCols)),  
 #                       bias = 1, space = "rgb")
levelplot(UD.sumX, 
           margin = FALSE,
           xlab = NULL, 
           ylab = NULL, 
           maxpixels = 1e5,
           col.regions = mCols, at = rng,
           colorkey = list(labels=list(at=c(1.5, 2.5, 3.5),  
                               labels=c("Corridor", "Movement", "Stopover"), cex=1.5),
                               labels=list(cex=12),
                               space = "bottom"), 
                               par.strip.text = list(fontface='bold', cex=1),
           par.settings = list(axis.line = list(col = "black"),
                               strip.background = list(col = 'transparent'), 
                               strip.border = list(col = 'transparent')),
                               scales = list(cex = 1.25))  +
           latticeExtra::layer(sp.polygons(States, col = "darkgray", lwd = 0.5)) 

3.2 Fall Migration UD

Prep Data (same as previous, but, fall filter)

#MRanges 
MoveBirds.df = MoveBirds@data %>%
                mutate(Long = MoveBirds@coords[,1],
                       Lat = MoveBirds@coords[,2],
                       Type = "gps",
                       Error = Error_Radius) %>%
                filter(DOY >= 250 & DOY <= 334) %>% #Filter to fall
                select(Animal_ID, Long, Lat, LocT, Type, Error)

MoveBirds.df$Duplicated = duplicated(MoveBirds.df[, c("Animal_ID", "LocT")])

MoveBirds.df = MoveBirds.df %>% filter(Duplicated == FALSE)

getDuplicatedTimestamps(x=as.factor(MoveBirds.df$Animal_ID),
                        timestamps=MoveBirds.df$LocT,
                        sensorType = MoveBirds.df$Type)
## NULL
MoveBirds.df = arrange(MoveBirds.df, Animal_ID)

IDCount = as.data.frame(
              MoveBirds.df %>%
                group_by(Animal_ID) %>%
                summarise(Count = length(Animal_ID)))

Loop through UDs by Individual Birds. Save each to folder.

Bird.lvls = levels(factor(MoveBirds.df$Animal_ID))

for(i in 1:length(Bird.lvls)){

    MoveBirds.df2 = MoveBirds.df %>% filter(Animal_ID == Bird.lvls[i])
    
    Chk.pnts = subset(MoveBirds, Animal_ID == Bird.lvls[i])
    
    Fixes = length(MoveBirds.df2$Animal_ID)
    
    if(Fixes < 31){next}
      else{
        
         Duration.m = as.numeric(max(MoveBirds.df2$LocT) - min(MoveBirds.df2$LocT))
          
         BirdM = move(x=MoveBirds.df2$Long, 
                  y=MoveBirds.df2$Lat, 
                  time=MoveBirds.df2$LocT,
                  proj=CRS(proj4string(MoveBirds)),
                  data=MoveBirds.df2,  
                  animal=factor(MoveBirds.df2$Animal_ID))
      
          dbbmm1 = brownian.bridge.dyn(object=BirdM,
                                       raster = UD.r,
                                       location.error= MoveBirds.df2$Error,#50000, #7920,
                                       time.step=15,
                                       window.size=31, 
                                       margin=11)
          
          writeRaster(dbbmm1, paste("./UD_rastersF/", Bird.lvls[i], ".tif", sep=""), overwrite=TRUE)
          
          Temp.rast = raster(paste("./UD_rastersF/", Bird.lvls[i], ".tif", sep=""))
          
          values(Temp.rast) = Scale(values(Temp.rast))
          
          Temp.rast = Temp.rast*Duration.m
          
          writeRaster(Temp.rast, paste("./UD_rastersF/", Bird.lvls[i], ".tif", sep=""), overwrite=TRUE)
          
      }
    
}

Sum and scale inividual bird UDs

UDfiles = list.files(path="./UD_rastersF", 
                     pattern="*.tif", full.names=T, recursive=T)
 
UD.stk = stack(UDfiles)
UD.sum = sum(UD.stk)

UD.sum = crop(UD.sum, extent(UD.r1))
UD.sum = UD.sum + UD.r1
UD.sum = projectRaster(UD.sum, crs=CRS(proj4string(States)))
UD.sum = disaggregate(UD.sum, fact = 5, method="bilinear")
values(UD.sum) = Scale(values(UD.sum))

UD.sum.cont = UD.sum

ValVect = values(UD.sum)
ValVect[ValVect == 0] = NA

Quantiles = quantile(ValVect, probs = c(0.5,0.75,0.99), na.rm=T)

Quant1 = quantile(ValVect, probs = 0.5, na.rm=T)
Quant2 = quantile(ValVect, probs = 0.75, na.rm=T)
Quant3 = quantile(ValVect, probs = 0.99, na.rm=T)

values(UD.sum) = ifelse(values(UD.sum) < Quant1, 1,
                    ifelse(values(UD.sum) >= Quant1 & values(UD.sum) < Quant2, 2,
                        ifelse(values(UD.sum) >= Quant2 & values(UD.sum) < Quant3, 3,
                            ifelse(values(UD.sum) >= Quant3, 4, NA))))


mCols = c("#A50026", "#D73027", "#F46D43", "#FDAE61", "#E0F3F8", "#ABD9E9", "#74ADD1")
mCols = c("yellow2", "orange", "red")

rng = seq(1, 4, 1)

FallLC.test = UD.sum
UD.sumX = UD.sum
UD.sumX[UD.sumX == 1] = NA

levelplot(UD.sumX, 
           margin = FALSE,
           xlab = NULL, 
           ylab = NULL, 
           maxpixels = 1e5,
           col.regions = mCols, at = rng,
           colorkey = list(labels=list(at=c(1.5, 2.5, 3.5),  
                               labels=c("Corridor", "Movement", "Stopover"), cex=1.5),
                               labels=list(cex=12),
                               space = "bottom"), 
                               par.strip.text = list(fontface='bold', cex=1),
           par.settings = list(axis.line = list(col = "black"),
                               strip.background = list(col = 'transparent'), 
                               strip.border = list(col = 'transparent')),
                               scales = list(cex = 1.25))  +
           latticeExtra::layer(sp.polygons(States, col = "darkgray", lwd = 0.5)) 

3.3 Landcover (RSF)

Jacobs preference index D (Jacobs 1974): D = (r – p)/(r + p – 2rp) r is the proportion of a habitat in occupied territory, and p is the proportion of the same habitat in the study area. The index ranges from –1 (complete avoidance) to +1 (exclusive use). The value of 0 indicates that the habitat was used in proportion to its availability.

NLCD = raster("C:/Users/humph173/Documents/Michigan_State/Marten/Marten_SDM/NLCD_2011/landcover_full.tif")


Spring.LC = rasterToPoints(SpingLC.test, sp=T)
Spring.LC = subset(Spring.LC, layer != 1)
names(Spring.LC@data) = "UD"

Fall.LC = rasterToPoints(FallLC.test, sp=T)
Fall.LC = subset(Fall.LC, layer != 1)
names(Fall.LC@data) = "UD"



Fall.LC$LC = extract(NLCD, spTransform(Fall.LC, proj4string(NLCD)), method="simple")
Spring.LC$LC = extract(NLCD, spTransform(Spring.LC, proj4string(NLCD)), method="simple")


Fall.LC.table = Fall.LC@data

Fall.LC.table = as.data.frame(
                     Fall.LC.table %>%
                       group_by(UD, LC) %>%
                       summarise(Count = length(LC)))

FLC2 = Fall.LC.table %>% filter(UD == 2)
FLC3 = Fall.LC.table %>% filter(UD == 3)
FLC4 = Fall.LC.table %>% filter(UD == 4)

FLC2$UD3 = with(FLC3, 
                  Count[match(
                      FLC2$LC,
                        LC)])

FLC2$UD4 = with(FLC4, 
                  Count[match(
                      FLC2$LC,
                        LC)])

FLC2[is.na(FLC2)] = 0

UD2_BackGrnd = sum(FLC2$Count)
UD3_BackGrnd = sum(FLC2$UD3)
UD4_BackGrnd = sum(FLC2$UD4)

TotalBG = UD2_BackGrnd + UD3_BackGrnd + UD4_BackGrnd



FLC2 = FLC2 %>%
       mutate(Total.p = (Count + UD3 + UD4)/TotalBG,
               UD2.p = (Count)/UD2_BackGrnd,
               UD3.p = (UD3)/UD3_BackGrnd,
               UD4.p = (UD4)/UD4_BackGrnd)

#D = (r – p)/(r + p – 2rp) (Resource Selection Function)
FLC2$D_UD2 = (FLC2$UD2.p - FLC2$Total.p)/(FLC2$UD2.p + FLC2$Total.p - (2*FLC2$UD2.p*FLC2$Total.p))
FLC2$D_UD3 = (FLC2$UD3.p - FLC2$Total.p)/(FLC2$UD3.p + FLC2$Total.p - (2*FLC2$UD3.p*FLC2$Total.p))
FLC2$D_UD4 = (FLC2$UD4.p - FLC2$Total.p)/(FLC2$UD4.p + FLC2$Total.p - (2*FLC2$UD4.p*FLC2$Total.p))


##################################################SPRING
Spring.LC.table = Spring.LC@data

Spring.LC.table = as.data.frame(
                     Spring.LC.table %>%
                       group_by(UD, LC) %>%
                       summarise(Count = length(LC)))

SLC2 = Spring.LC.table %>% filter(UD == 2)
SLC3 = Spring.LC.table %>% filter(UD == 3)
SLC4 = Spring.LC.table %>% filter(UD == 4)

SLC2$UD3 = with(SLC3, 
                  Count[match(
                      SLC2$LC,
                        LC)])

SLC2$UD4 = with(SLC4, 
                  Count[match(
                      SLC2$LC,
                        LC)])

SLC2[is.na(SLC2)] = 0

UD2_BackGrnd = sum(SLC2$Count)
UD3_BackGrnd = sum(SLC2$UD3)
UD4_BackGrnd = sum(SLC2$UD4)

TotalBG = UD2_BackGrnd + UD3_BackGrnd + UD4_BackGrnd



SLC2 = SLC2 %>%
       mutate(Total.p = (Count + UD3 + UD4)/TotalBG,
               UD2.p = (Count)/UD2_BackGrnd,
               UD3.p = (UD3)/UD3_BackGrnd,
               UD4.p = (UD4)/UD4_BackGrnd)

#D = (r – p)/(r + p – 2rp)
SLC2$D_UD2 = (SLC2$UD2.p - SLC2$Total.p)/(SLC2$UD2.p + SLC2$Total.p - (2*SLC2$UD2.p*SLC2$Total.p))
SLC2$D_UD3 = (SLC2$UD3.p - SLC2$Total.p)/(SLC2$UD3.p + SLC2$Total.p - (2*SLC2$UD3.p*SLC2$Total.p))
SLC2$D_UD4 = (SLC2$UD4.p - SLC2$Total.p)/(SLC2$UD4.p + SLC2$Total.p - (2*SLC2$UD4.p*SLC2$Total.p))


#legend key for NLCD
nlcd.leg = read.csv("C:/Users/humph173/Documents/LabWork/Patuxent/Patux_level1/NLCD_leg.csv", 
                        stringsAsFactors=FALSE,
                        header = TRUE, sep=",")

FLC2$Cover = with(nlcd.leg,
                   Type[match(FLC2$LC,
                                      Code)])

SLC2$Cover = with(nlcd.leg,
                   Type[match(SLC2$LC,
                                      Code)])



LC.plt.df = FLC2 %>% 
            mutate(Season = "Fall") %>%
            select(Season, Cover, D_UD4)

LC.plt.df2 = SLC2 %>% 
            mutate(Season = "Spring") %>%
            select(Season, Cover, D_UD4)

cor(LC.plt.df$D_UD4, LC.plt.df2$D_UD4)
## [1] 0.9100678
LC.plt.df = rbind(LC.plt.df2, LC.plt.df)
LC.plt.df = LC.plt.df %>% filter(Cover != "No Data")

LC.plt.df$Season = as.factor(LC.plt.df$Season)
LC.plt.df$Season <- ordered(LC.plt.df$Season, levels = c("Spring", "Fall"))


ggplot(FLC2, aes(factor(LC), D_UD4)) +
      geom_bar(stat="identity")

SLabs = paste(FLC2$Cover)

LU.plt = ggplot(LC.plt.df, aes(factor(Cover), D_UD4)) + 
                 geom_bar(stat="identity", fill = "darkgray") +
                 facet_wrap(~Season, ncol=2) +
                  coord_flip() +
                      theme_classic() +
                       xlab(" ") +
                       ylab("Resource Selection Index (Jacob's D)") +
                       scale_y_continuous(breaks = seq(-1,1,0.5), 
                            limits =c(-1,1)) +
                        theme(plot.title = element_text(hjust = 0.5),
                              legend.position = "bottom",
                              legend.title=element_text(size=22), 
                              legend.text=element_text(size=18),
                              #panel.grid.major = element_blank(),
                              #panel.grid.minor = element_blank(),
                              strip.background = element_blank(),
                              #panel.border = element_rect(colour = "black"),
                              title = element_text(face="bold", size=22, hjust=0.5),
                              strip.text = element_text(face="bold", size = 22),
                              axis.title.y = element_text(face="bold", size = 24),
                              axis.text.x = element_text(face="bold", size=16, vjust=0.5, 
                                                         hjust=0.5, angle=0),
                              axis.text.y = element_text(face="bold", size=22),
                              axis.title.x = element_text(face="bold", size = 22))

LU.plt

3.4 Poultry Exposure

leg_centers = SpatialPointsDataFrame(gCentroid(Counties, byid=TRUE), 
                                                        Counties@data, match.ID=FALSE)

Counties$Cent.Lon = leg_centers@coords[,1]
Counties$Cent.Lat = leg_centers@coords[,2]

Fall.poultry = as.data.frame(
                        over(Fall.LC, Counties)[,c("Broilers", "Ducks","Layers", "Pullets", "Roosters", "Turkey", "Cent.Lon", "Cent.Lat", "atlas_stco")])

Fall.poultry = cbind(Fall.poultry, Fall.LC@data)


Spring.poultry = as.data.frame(
                        over(Spring.LC, Counties)[,c("Broilers", "Ducks","Layers", "Pullets", "Roosters", "Turkey", "Cent.Lon", "Cent.Lat", "atlas_stco")])

Spring.poultry = cbind(Spring.poultry, Spring.LC@data)


Fall.poultry$Dups = duplicated(Fall.poultry[,c("Cent.Lon", "Cent.Lat", "atlas_stco")])
Fall.poultry = Fall.poultry %>% filter(Dups == FALSE,
                                       is.na(Cent.Lat) == FALSE,
                                       is.na(Cent.Lon) == FALSE)

Fall.poultry[is.na(Fall.poultry)] = 0


Spring.poultry$Dups = duplicated(Spring.poultry[,c("Cent.Lon", "Cent.Lat", "atlas_stco")])
Spring.poultry = Spring.poultry %>% filter(Dups == FALSE,
                                       is.na(Cent.Lat) == FALSE,
                                       is.na(Cent.Lon) == FALSE)

Spring.poultry[is.na(Spring.poultry)] = 0

    
###
Fall.mlt = Fall.poultry %>% select(UD, Broilers, Ducks, Layers, Pullets, Turkey)
Fall.mlt = melt(Fall.mlt, "UD") 
Fall.mlt$value = as.numeric(Fall.mlt$value)

Fall.mlt = as.data.frame(
            Fall.mlt %>%
            group_by(UD, variable) %>%
            summarise(Count = sum(value)))

Fall.mlt$Season = "Fall"

#Spring
Spring.mlt = Spring.poultry %>% select(UD, Broilers, Ducks, Layers, Pullets, Turkey)
Spring.mlt = melt(Spring.mlt, "UD") 
Spring.mlt$value = as.numeric(Spring.mlt$value)

Spring.mlt = as.data.frame(
                Spring.mlt %>%
                group_by(UD, variable) %>%
                summarise(Count = sum(value)))

Spring.mlt$Season = "Spring"

Polutry.set = rbind(Spring.mlt, Fall.mlt)



Polutry.set$s.value = ifelse(Polutry.set$variable == "Broilers", Polutry.set$Count/100000000,
                          ifelse(Polutry.set$variable == "Ducks", Polutry.set$Count/100000,
                              ifelse(Polutry.set$variable == "Layers", Polutry.set$Count/10000000,
                                  ifelse(Polutry.set$variable == "Pullets", Polutry.set$Count/10000000,
                                      ifelse(Polutry.set$variable == "Turkey", Polutry.set$Count/10000000,
                                             Polutry.set$Count)))))


df2 = ddply(Polutry.set, .(UD, variable), transform, percent = s.value/sum(s.value) * 100)

# Format the labels and calculate their positions
df2 = ddply(df2, .(UD, variable), transform, pos = (cumsum(s.value) - 0.5 * s.value))
df2$label = paste0(sprintf("%.0f", df2$percent), "%")

df2$variable = ordered(df2$variable, levels = c("Broilers", "Layers", "Pullets", "Turkey", "Ducks"))

# Plot
ggplot(df2, aes(x = factor(UD), y = s.value, fill = Season)) +
        geom_bar(position = position_stack(), 
                 stat = "identity", width = .7) +
         scale_fill_manual(values = c("lightgray","darkgray")) +
        geom_text(aes(label = label), 
                  position = position_stack(vjust = 5), size = 4) +
        facet_grid(~variable, scales="fixed") +
        theme_classic() +
        #coord_flip() +
           xlab(" ") +
           ylab("Poultry (head)") +
           scale_x_discrete(labels = c("Corridor", "Movement", "Stopover")) +
           #     limits =c(-1,1)) +
            theme(plot.title = element_text(hjust = 0.5),
                  legend.position = c(0.5, 0.7),
                  legend.title=element_text(size=24), 
                  legend.text=element_text(size=18),
                  #panel.grid.major = element_blank(),
                  #panel.grid.minor = element_blank(),
                  strip.background = element_blank(),
                  #panel.border = element_rect(colour = "black"),
                  title = element_text(face="bold", size=22, hjust=0.5),
                  strip.text = element_text(face="bold", size = 24),
                  axis.title.y = element_text(face="bold", size = 24),
                  axis.text.x = element_text(face="bold", size=16, vjust=1, 
                                             hjust=1, angle=60),
                  axis.text.y = element_text(face="bold", size=22),
                  axis.title.x = element_text(face="bold", size = 22))

4 Movement Models

Discrete movement models

4.1 Preprocessing

Geographic projection and remove duplicate timestamps.

Sp.pnts = SpatialPointsDataFrame(Locs, Argos_sp)
proj4string(Sp.pnts) = proj4string(States)

Sp.pnts$US = is.na(over(Sp.pnts, States))[,1]

nProj = "+proj=merc +a=6378137 +b=6378137 +lat_ts=0 +lon_0=0
         +x_0=0 +y_0=0 +k=1 +units=km +nadgrids=@null +no_defs"

proj.pnts = spTransform(Sp.pnts, nProj)

proj.pnts$Dups = duplicated(proj.pnts@data[,c("Animal_ID", "LocT")]) #Bird-specific timestamp duplicates

proj.pnts@data %>%
  group_by(Dups) %>%
  summarise(Count = length(Dups))
## # A tibble: 2 x 2
##   Dups  Count
##   <lgl> <int>
## 1 FALSE 33533
## 2 TRUE     50
proj.pnts = subset(proj.pnts, Dups == FALSE)

proj.pnts$DOY = yday(proj.pnts$LocT) #Day of Year

proj.pnts$pLong = proj.pnts@coords[,1]
proj.pnts$pLat = proj.pnts@coords[,2]
proj.pnts$ID = proj.pnts$Animal_ID

Calculate distance to water (km) and assign a unique integer type bird identifier.

GLCcdm = raster("C:/Users/humph173/Documents/LabWork/Patuxent/Patux_level1/Arc/LC/WetDist/GLCcdm.tif") 

proj.pnts$wDs = as.numeric(
                     raster::extract(GLCcdm, 
                     spTransform(proj.pnts, 
                     CRS(proj4string(GLCcdm))), 
                     method = "simple"))

range(proj.pnts$wDs, na.rm=T)
## [1]   0.0000 354.2871
#Bird ID
proj.pnts$ID_Int = as.integer(factor(proj.pnts$Animal_ID))

Format Data for modeling as required later by the moveHMM package. Calculates step length and turning angle.

data = prepData(proj.pnts@data, type="UTM", coordNames=c("pLong","pLat"))

head(data)
##               ID     step       angle         x        y Latitude
## 1 BWTE13S_131009 2.715642          NA -11925.32 6810.438   52.057
## 2 BWTE13S_131009 1.679576  0.23183077 -11922.76 6809.533   52.052
## 3 BWTE13S_131009 2.916863  3.12513754 -11921.09 6809.352   52.051
## 4 BWTE13S_131009 3.912969 -2.92448847 -11923.99 6809.714   52.053
## 5 BWTE13S_131009 2.465601 -2.69449330 -11920.09 6810.076   52.055
## 6 BWTE13S_131009 4.462573  0.06402966 -11922.21 6808.809   52.048
##   Longitude DAF_Filter         Data_DOI      Species      Animal_ID  Fate
## 1  -107.127          0 10.5056/AABBCCDD Anas discors BWTE13S_131009 alive
## 2  -107.104          0 10.5056/AABBCCDD Anas discors BWTE13S_131009 alive
## 3  -107.089          0 10.5056/AABBCCDD Anas discors BWTE13S_131009 alive
## 4  -107.115          0 10.5056/AABBCCDD Anas discors BWTE13S_131009 alive
## 5  -107.080          0 10.5056/AABBCCDD Anas discors BWTE13S_131009 alive
## 6  -107.099          0 10.5056/AABBCCDD Anas discors BWTE13S_131009 alive
##   Program    PTT Satellite Location_Datetime_GMT Location_Class
## 1    8387 131009        MA   2013/08/13 18:12:50              2
## 2    8387 131009        NL   2013/08/13 18:44:28              3
## 3    8387 131009        NP   2013/08/13 19:03:48              3
## 4    8387 131009        NN   2013/08/13 20:00:48              1
## 5    8387 131009        NP   2013/08/13 20:46:25              2
## 6    8387 131009        NN   2013/08/13 21:40:43              2
##   Lat_Solution_1 Lon_Solution_1 Lat_Solution_2 Lon_Solution_2 N_Messages
## 1         52.057       -107.127         52.057       -107.127         11
## 2         52.052       -107.104         52.052       -107.104          4
## 3         52.051       -107.089         52.051       -107.089          4
## 4         52.053       -107.115         52.053       -107.115          4
## 5         52.055       -107.080         52.055       -107.080          7
## 6         52.048       -107.099         52.048       -107.099          8
##   Nmess_GT_neg120dB Best_Level_dB Pass_Duration NOPC Location_Index
## 1                 0          -120           726    3             40
## 2                 0          -130           300    3             50
## 3                 0          -132           291    3             60
## 4                 0          -134           247    3             58
## 5                 0          -125           376    3             58
## 6                 0          -124           409    3             58
##   Frequency Altitude Error_Radius Semimajor_Axis Semiminor_Axis
## 1 401672924    0.472          492           1140            212
## 2 401672917    0.473          213            426            106
## 3 401672920    0.482          192            513             71
## 4 401672915    0.472          894           2238            356
## 5 401672919    0.483          348            790            153
## 6 401672919    0.481          311           2983             32
##   Ellipse_Orientation GDOP Message_Datetime_GMT Compression_Index sen1
## 1                 101   92  2013/08/13 18:22:14                 1    1
## 2                 153  414  2013/08/13 18:46:58                 1    2
## 3                  93  320  2013/08/13 19:04:47                 1  170
## 4                 138  839  2013/08/13 20:03:34                 2    3
## 5                  74  196  2013/08/13 20:48:31                 2    4
## 6                  79  440  2013/08/13 21:45:06                 2    5
##   sen2 sen3 sen4             ObsDate   ObsDateD                LocT LocHR
## 1    1    0    1 2013-08-13 18:12:50 2013-08-13 2013-08-13 13:12:50    13
## 2    1    3    1 2013-08-13 18:44:28 2013-08-13 2013-08-13 13:44:28    13
## 3  200    2   35 2013-08-13 19:03:48 2013-08-13 2013-08-13 14:03:48    14
## 4    1    0    1 2013-08-13 20:00:48 2013-08-13 2013-08-13 15:00:48    15
## 5    1    0    1 2013-08-13 20:46:25 2013-08-13 2013-08-13 15:46:25    15
## 6    1    0    1 2013-08-13 21:40:43 2013-08-13 2013-08-13 16:40:43    16
##   LocDY LocWK LocMN LocYR   Season   US  Dups DOY       wDs ID_Int
## 1    13    33     8  2013 Breeding TRUE FALSE 225 0.9026195      1
## 2    13    33     8  2013 Breeding TRUE FALSE 225 0.0000000      1
## 3    13    33     8  2013 Breeding TRUE FALSE 225 1.3539293      1
## 4    13    33     8  2013 Breeding TRUE FALSE 225 0.0000000      1
## 5    13    33     8  2013 Breeding TRUE FALSE 225 2.0183187      1
## 6    13    33     8  2013 Breeding TRUE FALSE 225 0.6382484      1
data$step[is.na(data$step)] = mean(data$step, na.rm=T) #impute 1 missing lag value
data$angle[is.na(data$angle)] = mean(data$angle, na.rm=T)
data$wDs[is.na(data$wDs)] = mean(data$wDs, na.rm=T)

4.2 Step and Angle Statistics

Descriptive stats to inform model calibration. Note that greatest step distances are associated with the spring and fall migrations and that mean turning angle is greatest during the overwintering periods indicated an increase in turns. Also note that negative angles are correlated to the breeding season as well as periods before spring and after fall migrations; perhaps associated with increased foraging/feeding behavior?

mean(data$Error_Radius)/1000 #Mean Error Radius from Data (m to km)
## [1] 1.21875
mean(data$step, na.rm=T)  #Mean step distance (km)
## [1] 7.268012
mean(data$angle, na.rm=T)  #Mean turning angle (radians)
## [1] 0.0514526
StepStats = as.data.frame(
                  data %>%
                  group_by(LocWK) %>%
                  summarise(MeanS = mean(step, na.rm=T),
                            MeanA = mean(angle, na.rm=T),
                            MeanW = mean(wDs, na.rm=T)))

ggplot(StepStats, aes(LocWK, MeanS)) +
       geom_bar(stat="identity", fill = "darkgray") +
       theme_classic() +
             xlab("Week of Year ") +
             ylab("Mean Step Length (km)") +
             xlim(1, 52) +
              theme(plot.title = element_text(hjust = 0.5),
                    legend.position = "bottom",
                    legend.title=element_text(size=22), 
                    legend.text=element_text(size=18),
                    strip.background = element_blank(),
                    title = element_text(face="bold", size=22, hjust=0.5),
                    strip.text = element_text(face="bold", size = 22),
                    axis.title.y = element_text(face="bold", size = 24),
                    axis.text.x = element_text(face="bold", size=16, vjust=0.5, 
                                               hjust=0.5, angle=0),
                    axis.text.y = element_text(face="bold", size=20),
                    axis.title.x = element_text(face="bold", size = 22))

ggplot(StepStats, aes(LocWK, MeanA)) +
       geom_bar(stat="identity", fill = "darkgray") +
       theme_classic() +
             xlab("Week of Year ") +
             ylab("Mean Turning Angle (radians)") +
             xlim(1, 52) +
              theme(plot.title = element_text(hjust = 0.5),
                    legend.position = "bottom",
                    legend.title=element_text(size=22), 
                    legend.text=element_text(size=18),
                    strip.background = element_blank(),
                    title = element_text(face="bold", size=22, hjust=0.5),
                    strip.text = element_text(face="bold", size = 22),
                    axis.title.y = element_text(face="bold", size = 24),
                    axis.text.x = element_text(face="bold", size=16, vjust=0.5, 
                                               hjust=0.5, angle=0),
                    axis.text.y = element_text(face="bold", size=20),
                    axis.title.x = element_text(face="bold", size = 22))

4.3 Initial Movement States

Estimate initial movement states using k-means clustering before constructing movement models.

Find the optimal umber of clusters (k).

Kmeans.df = as.data.frame(data %>% select(step, angle))

wss.func = function(data, nc=10, seed=1234){
                     wss = (nrow(data)-1)*sum(apply(data,2,var))
                     
                     for (i in 2:nc){
                          set.seed(seed)
                          wss[i] = sum(kmeans(data, centers=i)$withinss)}
                     
                     wss.df = as.data.frame(cbind(1:nc, wss))
                     names(wss.df) = c("Clusters","SS")
                     
                     return(wss.df)}


BClust = wss.func(Kmeans.df)

ggplot(BClust, aes(Clusters, SS/10^6)) +
       geom_vline(xintercept = 3,
                   linetype = "solid",
                   col = "red",
                   size = 0.5) +
       geom_point(shape=1, size=2) +
       geom_line() +
       scale_x_continuous(breaks = seq(0, 10, 1), 
                                     limits =c(0,10)) +
       theme_classic() +
             ylim(0,150) +
             xlab("Number of Clusters (k)") +
             ylab("Within group Sum of Squares") +
             theme(plot.title = element_text(hjust = 0.5),
                    legend.position = "bottom",
                    legend.title=element_text(size=22), 
                    legend.text=element_text(size=18),
                    strip.background = element_blank(),
                    title = element_text(face="bold", size=22, hjust=0.5),
                    strip.text = element_text(face="bold", size = 22),
                    axis.title.y = element_text(face="bold", size = 24),
                    axis.text.x = element_text(face="bold", size=16, vjust=0.5, 
                                               hjust=0.5, angle=0),
                    axis.text.y = element_text(face="bold", size=20),
                    axis.title.x = element_text(face="bold", size = 22))

Assign Loactions to Inital Cluster Designation for later comparison.

Clust_k2 = kmeans(Kmeans.df, 2)
data$K2 = Clust_k2$cluster

Clust_k3 = kmeans(Kmeans.df, 3)
data$K3 = Clust_k3$cluster


Kpnts = as.data.frame(data) %>%
        select(Longitude, Latitude, K2, K3)

4.4 Hidden Markov Models

Using the moveHMM package.

Model1 (m1) parameters are set to distinguish transitions between two movement states; frequent local (5km) movements vs. less frequent movement over moderate (50km) leg distances.

#Step parameters
mu0 = c(2, 10) # one value for each state (units are km)
sigma0 = c(2, 10) 
zeromass0 = c(0.01, 0.01) # step zero-mass for zeros in data
stepPar0 = c(mu0, sigma0, zeromass0) #combine

#Angle parameters
angleMean0 = c(0.2, 0.1) # angles mean
kappa0 = c(1, 0.5) # angle concentration
anglePar0 = c(angleMean0, kappa0) #combine

#Model 1
m1 = fitHMM(data=data,
            nbStates=2,
            stepPar0=stepPar0,
            anglePar0=anglePar0,
            formula= ~1)

Record Predicted Movement States from m1

data$m1k2 = viterbi(m1)

cor(data$m1k2, data$K2)
## [1] -0.2421654
chk.frm = as.data.frame(cbind(data$m1k2, data$K2))
names(chk.frm) = c("HMM", "KMEANS")

Model2 (m2) parameters are set to distinguish transitions between three movement states; frequent local (5km) movements vs. less frequent movement over moderate (50km) and even less frequent long-range (100km) distances.

#Step parameters
mu0 = c(2, 10, 20) # one value for each state (units are km)
sigma0 = c(2, 10, 20) 
zeromass0 = c(0.01, 0.01, 0.01) # step zero-mass
stepPar0 = c(mu0, sigma0, zeromass0) #combine

#Angle parameters
angleMean0 = c(0.2, 0.1, 0.1) # angles mean
kappa0 = c(1, 0.5, 0.5) # angle concentration
anglePar0 = c(angleMean0, kappa0) #combine

#Model 2
m2 = fitHMM(data=data,
            nbStates=3,
            stepPar0=stepPar0,
            anglePar0=anglePar0,
            formula=~1)

Record Predicted Movement States from m2

data$m1k3 = viterbi(m2)


cor(data$m1k3, data$K3)
## [1] -0.2165064
chk.frm = as.data.frame(cbind(data$m1k3, data$K3))
names(chk.frm) = c("HMM", "KMEANS")

Compare 2 and 3 Movement State HMM Models
Lowest AIC score is best. The three-state model is most supported.

AIC(m1, m2)
##   Model      AIC
## 1    m2 231196.2
## 2    m1 239089.1

Map movement state designation to new column for later comparison.

sp = as.data.frame(stateProbs(m1))
names(sp) = c("M1_s1","M1_s2")

data = cbind(data, sp)


sp = as.data.frame(stateProbs(m2))
names(sp) = c("M2_s1","M2_s2", "M2_s3")

data = cbind(data, sp)

4.5 Spatio-temporal Movement Model

4.5.1 Data Preperation

Calculate bird-specific time between locations. To be used to account for temporal correlation.

data.df = as.data.frame(data)

proj.pnts = SpatialPointsDataFrame(data.df[,c("Longitude","Latitude")], data.df)
proj4string(proj.pnts) = proj4string(States)


ID.levels = levels(factor(proj.pnts$Animal_ID))

total.birds = length(ID.levels)

for(i in 1:total.birds){

   tmp.bird = subset(proj.pnts, Animal_ID == ID.levels[i])
   
   tmp.bird$LocT_Lag1 = lag(tmp.bird$LocT)
   
   tmp.bird$Time.dif = as.numeric(tmp.bird$LocT - tmp.bird$LocT_Lag1)
   
   tmp.bird$Time.dif[is.na(tmp.bird$Time.dif)] = mean(tmp.bird$Time.dif, na.rm=T)
   
   tmp.bird$Time.dif = tmp.bird$Time.dif/sd(tmp.bird$Time.dif) #Scale
   
   tmp.bird$BT_step = as.integer(1:length(tmp.bird$Time.dif))

            if(i == 1){proj.pnts.df = tmp.bird@data}
                else{proj.pnts.df = rbind(proj.pnts.df, tmp.bird@data)}
   
   
}

proj.pnts = SpatialPointsDataFrame(proj.pnts.df[,c("Longitude", "Latitude")], proj.pnts.df)
proj4string(proj.pnts) = proj4string(States)

Mesh Construction Construct curved triangulated mesh scaled to Earth’s radius to model spatial effects.

Zext = gBuffer(gConvexHull(proj.pnts), width = 2)

NA.bnds = inla.sp2segment(Zext)

MaxEdge = 1

mesh0 = inla.mesh.2d(boundary = NA.bnds, 
                     cutoff = 1, 
                     max.edge = MaxEdge,
                     min.angle = 21) 

MeshLocs = cbind(mesh0$loc[,1], mesh0$loc[,2]) 

xyz = as.data.frame(                
          inla.mesh.map(MeshLocs, 
                        projection = "longlat", 
                        inverse = TRUE))

true.radius.of.earth = 6371 #km
radius.of.earth = 1

mesh.dom = inla.mesh.create(loc = xyz,
                     cutoff = 40/true.radius.of.earth, 
                     refine=list(max.edge = 1000/true.radius.of.earth, 
                                 min.angle = 26))

Click, drag, and zoom to view mesh

mesh.dom$n 
## [1] 2769
plot(mesh.dom, rgl = TRUE, main = " ")

You must enable Javascript to view this page properly.

Connect/relate bird locations to the mesh and convert to matrix.

locs = cbind(proj.pnts@coords[,1], proj.pnts@coords[,2])

locs = inla.mesh.map(locs, 
                     projection = "longlat", 
                     inverse = TRUE)

A.m1 = inla.spde.make.A(mesh.dom, 
                          alpha = 2,
                          loc=locs)

A.m3 = A.m2 = A.m1 #Extra copies, one for each movement state

Additional Covariates Scale and center possible covariates

FE.df = proj.pnts@data

FE.df$S_Time.dif = scale(FE.df$Time.dif, scale = T, center = T)

FE.df$S_Step = scale(FE.df$step, scale = T, center = T)
FE.df$S_Step[is.na(FE.df$S_Step)] = mean(FE.df$S_Step, na.rm=T)

FE.df$S_wDs = as.numeric(scale(FE.df$wDs, scale = T, center = T))

FE.df$S_Angle = scale(FE.df$angle, scale = T, center = T)
FE.df$S_Angle[is.na(FE.df$S_Angle)] = mean(FE.df$S_Angle, na.rm=T)

FE.df$S_Lat = scale(FE.df$Latitude, scale = T, center = T)

Calculate solar hour.

FE.df$LocSec = second(FE.df$LocT)

FE.df$JDay = JDymd(FE.df$LocYR,
                   FE.df$LocMN,
                   FE.df$LocDY,
                   FE.df$LocHR,
                   FE.df$LocMN,
                   FE.df$LocSec)

FE.df$LoD = daylength(FE.df$Latitude, FE.df$Longitude, FE.df$JDay, -20)

FE.df$HrAngle = hourangle(FE.df$JDay, FE.df$Longitude, -20)

FE.df$SunVEct = sunvector(FE.df$JDay, FE.df$Latitude, FE.df$Longitude, -20)

FE.df$LST = local2Solar(FE.df$LocT, lon = FE.df$Longitude)
FE.df$LSTctz = ymd_hms(format(FE.df$LocT, tz="America/Chicago", usetz=TRUE))
FE.df$LSThr = hour(FE.df$LSTctz)

Define spatial priors and indicies (assigning unique ID to mesh nodes).

spde = inla.spde2.pcmatern(mesh.dom, 
                           alpha = 2,
                           prior.range=c(1, 0.5),
                           prior.sigma=c(1, 0.5),
                           constr = TRUE)

field1 = inla.spde.make.index("field1", spde$n.spde)
field2 = inla.spde.make.index("field2", spde$n.spde)
field3 = inla.spde.make.index("field3", spde$n.spde)
field1x = inla.spde.make.index("field1x", spde$n.spde)
field1xx = inla.spde.make.index("field1xx", spde$n.spde)
field2x = inla.spde.make.index("field2x", spde$n.spde)

Organize data for two state movement model.

#Movement State 1
FE.lst.1 = list(c(field1,
                list(intercept1 = 1)),
                list(Time.dif = round(FE.df[,"S_Time.dif"],3),
                     STime.dif = round(Scale(FE.df[,"S_Time.dif"]),3),
                     Vals = 1:dim(FE.df)[1],
                     S_wDs = FE.df[,"S_wDs"],
                     S_Step = FE.df[,"S_Step"],
                     S_Angle = FE.df[,"S_Angle"],
                     Angle = FE.df[,"angle"],
                     Step = FE.df[,"step"],
                     Lat = FE.df[,"S_Lat"],
                     DOY = FE.df[,"DOY"],
                     WOY = FE.df[,"LocWK"],
                     LocHR = FE.df[,"LocHR"],
                     SolHr = FE.df[,"LSThr"],
                     ID_Int = FE.df[,"ID_Int"],
                     ID_Int2 = FE.df[,"ID_Int"],
                     BT_step = FE.df[,"BT_step"]))

FE.df$MState1 = ifelse(FE.df$m1k2 == 1, 1, 0) #Probabilty of State 1


K2.stk = inla.stack(data = list(State = FE.df$MState1),
                                       A = list(A.m1, 1), 
                                 effects = FE.lst.1,   
                                     tag = "k2.1")

Organize data for three state movement model.

#Movement State 1
FE.lst.1 = list(c(field1,
                list(intercept1 = 1)),
                list(Time.dif = round(FE.df[,"S_Time.dif"],3),
                     STime.dif = round(Scale(FE.df[,"S_Time.dif"]),3),
                     Vals = 1:dim(FE.df)[1],
                     S_wDs1 = FE.df[,"S_wDs"],
                     S_Step1 = FE.df[,"S_Step"],
                     S_Angle1 = FE.df[,"S_Angle"],
                     Lat = FE.df[,"S_Lat"],
                     DOY = FE.df[,"DOY"],
                     WOY1 = FE.df[,"LocWK"],
                     LocHR = FE.df[,"LocHR"],
                     SolHr1 = FE.df[,"LSThr"],
                     ID_Int = FE.df[,"ID_Int"],
                     ID_Int1 = FE.df[,"ID_Int"],
                     BT_step1 = FE.df[,"BT_step"]))

FE.df$MState1 = ifelse(FE.df$m1k3 == 1, 1, 0) #State 1

State1.stkC = inla.stack(data = list(State = FE.df$MState1),
                                       A = list(A.m1, 1), 
                                 effects = FE.lst.1,   
                                     tag = "m.1")

State1.stk = inla.stack(data = list(State = cbind(FE.df$MState1, NA, NA)),
                                       A = list(A.m1, 1), 
                                 effects = FE.lst.1,   
                                     tag = "m.1")


#Movement State 2
FE.lst.2 = list(c(field2,
                  field1x,
                list(intercept2 = 1)),
                list(Time.dif = round(FE.df[,"S_Time.dif"],3),
                     STime.dif = round(Scale(FE.df[,"S_Time.dif"]),3),
                     S_wDs2 = FE.df[,"S_wDs"],
                     S_Step2 = FE.df[,"S_Step"],
                     S_Angle2 = FE.df[,"S_Angle"],
                     Lat = FE.df[,"S_Lat"],
                     DOY = FE.df[,"DOY"],
                     WOY2 = FE.df[,"LocWK"],
                     LocHR = FE.df[,"LocHR"],
                     SolHr2 = FE.df[,"LSThr"],
                     ID_Int = FE.df[,"ID_Int"],
                     ID_Int2 = FE.df[,"ID_Int"],
                     BT_step2 = FE.df[,"BT_step"]))

FE.df$MState2 = ifelse(FE.df$m1k3 == 2, 1, 0)

State2.stk = inla.stack(data = list(State = cbind(NA, FE.df$MState2, NA)),
                                       A = list(A.m2, 1), 
                                 effects = FE.lst.2,   
                                     tag = "m.2")


#Movement State 3
FE.lst.3 = list(c(field3,
                  field1xx,
                  field2x,
                list(intercept3 = 1)),
                list(Time.dif = round(FE.df[,"S_Time.dif"],3),
                     STime.dif = round(Scale(FE.df[,"S_Time.dif"]),3),
                     S_wDs3 = FE.df[,"S_wDs"],
                     S_Step3 = FE.df[,"S_Step"],
                     S_Angle3 = FE.df[,"S_Angle"],
                     Lat = FE.df[,"S_Lat"],
                     DOY = FE.df[,"DOY"],
                     WOY3 = FE.df[,"LocWK"],
                     LocHR = FE.df[,"LocHR"],
                     SolHr3 = FE.df[,"LSThr"],
                     ID_Int = FE.df[,"ID_Int"],
                     ID_Int3 = FE.df[,"ID_Int"],
                     BT_step3 = FE.df[,"BT_step"]))


FE.df$MState3 = ifelse(FE.df$m1k3 == 3, 1, 0)

State3.stk = inla.stack(data = list(State = cbind(NA, NA, FE.df$MState3)),
                                       A = list(A.m3, 1), 
                                 effects = FE.lst.3,   
                                     tag = "m.3")

Joint.stk = inla.stack(State1.stk, State2.stk, State3.stk)

#save(list=c("Joint.stk", "spde"), file="./HPC_042319/SM_0427B.RData") #bundled for HPCC

4.5.2 Non Spatial-Temporal Comparison

Execute Two State Bayesian Hierarchical Model for Comparison to HMM. This base model is used to confirm that we can reasonable capture HMM results using the step length and turning angle movement covariates.

Non Spatial temporal Two State Model

LCPrior = list(theta=list(prior = "normal", param=c(0, 5)))

Frm1 = State ~ -1 +  intercept1 + S_Step + S_Angle 

#theta.m1  = Model1$internal.summary.hyperpar$mean
theta.m1 = c(-3.7033686, 1.1410830, 0.4698117, 0.2795529, 0.8987578, 0.6768913, 0.3915883)
              
              
Model.k2 = inla(Frm1, 
             data = inla.stack.data(K2.stk, spde=spde), 
             family = "binomial",
             verbose = TRUE,
             control.fixed = list(prec = 0.001, 
                                  prec.intercept = 0.0001), 
             control.predictor = list(
                                    A = inla.stack.A(K2.stk), 
                                    compute = TRUE, 
                                    link = 1), 
             control.inla = list(strategy="gaussian", 
                                 int.strategy = "eb"),
             #control.mode = list(restart = TRUE, theta = theta.m1),
             control.results = list(return.marginals.random = TRUE,
                                    return.marginals.predictor = TRUE),
             control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE))
summary(Model.k2)
## 
## Call:
## c("inla(formula = Frm1, family = \"binomial\", data = inla.stack.data(K2.stk, ",  "    spde = spde), verbose = TRUE, control.compute = list(dic = TRUE, ",  "    cpo = TRUE, waic = TRUE), control.predictor = list(A = inla.stack.A(K2.stk), ",  "    compute = TRUE, link = 1), control.inla = list(strategy = \"gaussian\", ",  "    int.strategy = \"eb\"), control.results = list(return.marginals.random = TRUE, ",  "    return.marginals.predictor = TRUE), control.fixed = list(prec = 0.001, ",  "    prec.intercept = 1e-04))")
## 
## Time used:
##  Pre-processing    Running inla Post-processing           Total 
##          1.3469         26.9246          1.9816         30.2531 
## 
## Fixed effects:
##                mean     sd 0.025quant 0.5quant 0.975quant     mode kld
## intercept1   1.5720 0.0386     1.4962   1.5720     1.6478   1.5720   0
## S_Step     -22.7452 0.4739   -23.6755 -22.7452   -21.8155 -22.7452   0
## S_Angle      0.0000 0.0292    -0.0573   0.0000     0.0574   0.0000   0
## 
## The model has no random effects
## 
## The model has no hyperparameters
## 
## Expected number of effective parameters(std dev): 3.01(0.00)
## Number of equivalent replicates : 11140.59 
## 
## Deviance Information Criterion (DIC) ...............: 9121.49
## Deviance Information Criterion (DIC, saturated) ....: 9121.48
## Effective number of parameters .....................: 3.011
## 
## Watanabe-Akaike information criterion (WAIC) ...: 9120.67
## Effective number of parameters .................: 2.186
## 
## Marginal log-Likelihood:  -4576.20 
## CPO and PIT are computed
## 
## Posterior marginals for linear predictor and fitted values computed

Compare Two State Models (Highly correlated)

idat = inla.stack.index(K2.stk, "k2.1")$data
FE.df$K2.st = Model.k2$summary.fitted.values$mean[idat]

cor(FE.df$K2.st, FE.df$M1_s1)
## [1] 0.7320376

Three State Model (Not spatial temporal)

Frm1 = State ~ -1 +  intercept1 + 
                     intercept2 +
                     intercept3 +
                     S_Step1 + S_Step2 + S_Step3 + S_Angle1 + S_Angle2 + S_Angle3

#theta.m1  = Model.k3$internal.summary.hyperpar$mean
theta.m1 = c(-3.7033686, 1.1410830, 0.4698117, 0.2795529, 0.8987578, 0.6768913, 0.3915883)
              
              
Model.k3 = inla(Frm1, 
             data = inla.stack.data(Joint.stk, spde=spde), 
             family = c("binomial","binomial","binomial"),
             verbose = TRUE,
             control.fixed = list(prec = 0.001, 
                                  prec.intercept = 0.0001), 
             control.predictor = list(
                                    A = inla.stack.A(Joint.stk), 
                                    compute = TRUE, 
                                    link = 1), 
             control.inla = list(strategy="gaussian", 
                                 int.strategy = "eb"),
             #control.mode = list(restart = TRUE, theta = theta.m1),
             control.results = list(return.marginals.random = TRUE,
                                    return.marginals.predictor = TRUE),
             control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE))
summary(Model.k3)
## 
## Call:
## c("inla(formula = Frm1, family = c(\"binomial\", \"binomial\", \"binomial\"), ",  "    data = inla.stack.data(Joint.stk, spde = spde), verbose = TRUE, ",  "    control.compute = list(dic = TRUE, cpo = TRUE, waic = TRUE), ",  "    control.predictor = list(A = inla.stack.A(Joint.stk), compute = TRUE, ",  "        link = 1), control.inla = list(strategy = \"gaussian\", ",  "        int.strategy = \"eb\"), control.results = list(return.marginals.random = TRUE, ",  "        return.marginals.predictor = TRUE), control.fixed = list(prec = 0.001, ",  "        prec.intercept = 1e-04), num.threads = 6)")
## 
## Time used:
##  Pre-processing    Running inla Post-processing           Total 
##          2.0992         39.2533         11.6928         53.0453 
## 
## Fixed effects:
##                 mean     sd 0.025quant  0.5quant 0.975quant      mode kld
## intercept1  -10.5238 0.1351   -10.7890  -10.5238   -10.2587  -10.5238   0
## intercept2   -0.7742 0.0118    -0.7975   -0.7742    -0.7510   -0.7742   0
## intercept3   -2.5144 0.0386    -2.5902   -2.5144    -2.4386   -2.5144   0
## S_Step1    -120.6519 1.4227  -123.4451 -120.6519  -117.8610 -120.6519   0
## S_Step2      -0.1787 0.0272    -0.2322   -0.1787    -0.1253   -0.1787   0
## S_Step3      12.8428 0.3884    12.0803   12.8428    13.6046   12.8428   0
## S_Angle1      0.0034 0.0164    -0.0289    0.0034     0.0356    0.0034   0
## S_Angle2     -0.0094 0.0117    -0.0325   -0.0094     0.0136   -0.0094   0
## S_Angle3     -0.0135 0.0321    -0.0766   -0.0135     0.0495   -0.0135   0
## 
## The model has no random effects
## 
## The model has no hyperparameters
## 
## Expected number of effective parameters(std dev): 9.108(0.00)
## Number of equivalent replicates : 11044.68 
## 
## Deviance Information Criterion (DIC) ...............: 72955.03
## Deviance Information Criterion (DIC, saturated) ....: NULL
## Effective number of parameters .....................: 9.127
## 
## Watanabe-Akaike information criterion (WAIC) ...: 72953.16
## Effective number of parameters .................: 7.254
## 
## Marginal log-Likelihood:  -36536.88 
## CPO and PIT are computed
## 
## Posterior marginals for linear predictor and fitted values computed

Compare Three State Models (second state is weak, other two highly correlated)

#State 1
idat = inla.stack.index(Joint.stk, "m.1")$data
FE.df$K3.st1 = Model.k3$summary.fitted.values$mean[idat]
cor(FE.df$K3.st1, FE.df$M2_s1)
## [1] 0.8505719
#State 2
idat = inla.stack.index(Joint.stk, "m.2")$data
FE.df$K3.st2 = Model.k3$summary.fitted.values$mean[idat]
cor(FE.df$K3.st2, FE.df$M2_s2)
## [1] 0.05873862
#State 3
idat = inla.stack.index(Joint.stk, "m.3")$data
FE.df$K3.st3 = Model.k3$summary.fitted.values$mean[idat]
cor(FE.df$K3.st3, FE.df$M2_s3)
## [1] 0.6769213

4.5.3 Spatial Temporal Models

Execute Model Three State

pcprior1 = list(prec = list(prior="pc.prec", param = c(1, 0.001))) 
pcprior2 = list(prec = list(prior="pc.prec", param = c(3, 0.1))) 
LCPrior = list(theta=list(prior = "normal", param=c(0, 5)))
TPrior = list(theta=list(prior = "normal", param=c(0, 5)))
LCPrior2 = list(theta=list(prior = "normal", param=c(0, 3)))


h.hyper = list(theta = list(prior="pc.cor1", param=c(0, 3)))
                 
                 
Frm1 = State ~ -1 +  intercept1 + 
                     intercept2 +
                     intercept3 +
                        f(field1, 
                          model = spde) +
                        f(field2, 
                          model = spde) +
                        f(field3, 
                          model = spde) +
                        f(field1x,       
                          copy = "field1",
                          fixed = FALSE) +
                        f(field1xx,       
                          copy = "field1",
                          fixed = FALSE) +
                        f(field2x,       
                          copy = "field2",
                          fixed = FALSE) +
                        f(ID_Int,   
                           constr=TRUE,
                           model="iid", 
                           hyper=LCPrior) + 
                        f(WOY1, 
                           model="iid",
                           constr=TRUE,
                           hyper=LCPrior) +
                        f(SolHr1, 
                           model="rw1",
                           constr=TRUE,
                           scale.model = TRUE,
                           hyper=TPrior) +
                        f(WOY2, 
                           model="iid",
                           constr=TRUE,
                           hyper=LCPrior) +
                        f(SolHr2, 
                           model="rw1",
                           constr=TRUE,
                           scale.model = TRUE,
                           hyper=TPrior) +
                        f(WOY3, 
                           model="iid",
                           constr=TRUE,
                           hyper=LCPrior) +
                        f(SolHr3, 
                           model="rw1",
                           constr=TRUE,
                           scale.model = TRUE,
                           hyper=TPrior) +
                                    f(BT_step1,
                           model="ou",  
                           constr=TRUE,
                           replicate = ID_Int1,
                           hyper = h.hyper) +
                                    f(BT_step2,
                           model="ou",  
                           constr=TRUE,
                           replicate = ID_Int2,
                           hyper = h.hyper) +
                                    f(BT_step3,
                           model="ou",  
                           constr=TRUE,
                           replicate = ID_Int3,
                           hyper = h.hyper) +   
                        f(inla.group(S_Step1, 
                           n = 20,
                           method = "quantile"),         
                           model="iid",  
                           constr=TRUE,
                           hyper = LCPrior) +
                                    f(inla.group(S_Step2, 
                           n = 20,
                           method = "quantile"),         
                           model="iid",  
                           constr=TRUE,
                           hyper = LCPrior) +
                                    f(inla.group(S_Step3, 
                           n = 20,
                           method = "quantile"),         
                           model="iid",  
                           constr=TRUE,
                           hyper = LCPrior) +
                        S_Angle1 + S_Angle2 + S_Angle3 + S_wDs1 + S_wDs2 + S_wDs3

Model1 = inla(Frm1, 
             data = inla.stack.data(Joint.stk, spde=spde), 
             family = c("binomial","binomial","binomial"),
             verbose = TRUE,
             control.fixed = list(prec = 0.001, 
                                  prec.intercept = 0.0001), 
             control.predictor = list(
                                    A = inla.stack.A(Joint.stk), 
                                    compute = TRUE, 
                                    link = 1), 
             control.inla = list(strategy="gaussian", 
                                 int.strategy = "eb"),
             #control.mode = list(restart = TRUE, theta = theta.m1),
             control.results = list(return.marginals.random = TRUE,
                                    return.marginals.predictor = TRUE),
             control.compute=list(dic = TRUE, cpo = TRUE, waic = TRUE))
summary(Model1)
## 
## Call:
## c("inla(formula = Frm1, family = c(\"binomial\", \"binomial\", \"binomial\"), ",  "    data = inla.stack.data(Joint.stk, spde = spde), verbose = TRUE, ",  "    control.compute = list(dic = TRUE, cpo = TRUE, waic = TRUE), ",  "    control.predictor = list(A = inla.stack.A(Joint.stk), compute = TRUE, ",  "        link = 1), control.inla = list(strategy = \"gaussian\", ",  "        int.strategy = \"eb\"), control.results = list(return.marginals.random = TRUE, ",  "        return.marginals.predictor = TRUE), control.fixed = list(prec = 0.001, ",  "        prec.intercept = 1e-04), control.mode = list(restart = TRUE, ",  "        theta = theta.m1), num.threads = 8)")
## 
## Time used:
##  Pre-processing    Running inla Post-processing           Total 
##          7.4194      82530.8565         26.8350      82565.1109 
## 
## Fixed effects:
##               mean     sd 0.025quant 0.5quant 0.975quant    mode kld
## intercept1  0.5336 0.2314     0.0793   0.5336     0.9874  0.5336   0
## intercept2 -1.9043 0.2227    -2.3414  -1.9043    -1.4675 -1.9043   0
## intercept3 -4.6123 0.3302    -5.2605  -4.6123    -3.9646 -4.6123   0
## S_Angle1    0.0029 0.0195    -0.0353   0.0029     0.0412  0.0029   0
## S_Angle2    0.0070 0.0190    -0.0304   0.0070     0.0443  0.0070   0
## S_Angle3   -0.0950 0.0550    -0.2029  -0.0950     0.0129 -0.0950   0
## S_wDs1      0.1040 0.1167    -0.1251   0.1040     0.3329  0.1040   0
## S_wDs2     -0.1247 0.1182    -0.3568  -0.1247     0.1071 -0.1247   0
## S_wDs3      0.2619 0.1345    -0.0021   0.2619     0.5257  0.2619   0
## 
## Random effects:
## Name   Model
##  field1   SPDE2 model 
## field2   SPDE2 model 
## field3   SPDE2 model 
## ID_Int   IID model 
## WOY1   IID model 
## SolHr1   RW1 model 
## WOY2   IID model 
## SolHr2   RW1 model 
## WOY3   IID model 
## SolHr3   RW1 model 
## BT_step1   Ornstein-Uhlenbeck model 
## BT_step2   Ornstein-Uhlenbeck model 
## BT_step3   Ornstein-Uhlenbeck model 
## inla.group(S_Step1, n = 20, method = "quantile")   IID model 
## inla.group(S_Step2, n = 20, method = "quantile")   IID model 
## inla.group(S_Step3, n = 20, method = "quantile")   IID model 
## field1x   Copy 
## field1xx   Copy 
## field2x   Copy 
## 
## Model hyperparameters:
##                                                                      mean
## Range for field1                                                   0.0178
## Stdev for field1                                                   2.8419
## Range for field2                                                   6.9812
## Stdev for field2                                                   0.4933
## Range for field3                                                   0.1975
## Stdev for field3                                                   1.3679
## Precision for ID_Int                                              24.5408
## Precision for WOY1                                                 4.5672
## Precision for SolHr1                                               3.5811
## Precision for WOY2                                                 4.4966
## Precision for SolHr2                                               3.4943
## Precision for WOY3                                                 4.3298
## Precision for SolHr3                                               1.8026
## Precision for BT_step1                                         20271.8517
## Phi for BT_step1                                                  18.9777
## Precision for BT_step2                                         19832.6710
## Phi for BT_step2                                                  18.2501
## Precision for BT_step3                                         19447.3432
## Phi for BT_step3                                                  23.0106
## Precision for inla.group(S_Step1, n = 20, method = "quantile")     0.1158
## Precision for inla.group(S_Step2, n = 20, method = "quantile")     0.2457
## Precision for inla.group(S_Step3, n = 20, method = "quantile")     0.2349
## Beta for field1x                                                  -1.0293
## Beta for field1xx                                                  0.7133
## Beta for field2x                                                   0.9991
##                                                                       sd
## Range for field1                                               2.000e-03
## Stdev for field1                                               2.113e-01
## Range for field2                                               1.216e+01
## Stdev for field2                                               7.826e-01
## Range for field3                                               8.870e-02
## Stdev for field3                                               4.424e-01
## Precision for ID_Int                                           8.759e+00
## Precision for WOY1                                             1.168e+00
## Precision for SolHr1                                           1.286e+00
## Precision for WOY2                                             1.136e+00
## Precision for SolHr2                                           1.189e+00
## Precision for WOY3                                             1.313e+00
## Precision for SolHr3                                           6.537e-01
## Precision for BT_step1                                         1.953e+04
## Phi for BT_step1                                               2.621e+01
## Precision for BT_step2                                         1.902e+04
## Phi for BT_step2                                               2.509e+01
## Precision for BT_step3                                         1.876e+04
## Phi for BT_step3                                               3.443e+01
## Precision for inla.group(S_Step1, n = 20, method = "quantile") 2.550e-02
## Precision for inla.group(S_Step2, n = 20, method = "quantile") 5.460e-02
## Precision for inla.group(S_Step3, n = 20, method = "quantile") 5.170e-02
## Beta for field1x                                               4.640e-02
## Beta for field1xx                                              7.010e-02
## Beta for field2x                                               3.143e-01
##                                                                0.025quant
## Range for field1                                                   0.0142
## Stdev for field1                                                   2.4445
## Range for field2                                                   0.4741
## Stdev for field2                                                   0.0378
## Range for field3                                                   0.0806
## Stdev for field3                                                   0.7080
## Precision for ID_Int                                              10.8967
## Precision for WOY1                                                 2.6725
## Precision for SolHr1                                               1.7240
## Precision for WOY2                                                 2.5905
## Precision for SolHr2                                               1.6727
## Precision for WOY3                                                 2.3456
## Precision for SolHr3                                               0.8594
## Precision for BT_step1                                          1320.7233
## Phi for BT_step1                                                   3.7027
## Precision for BT_step2                                          1226.4244
## Phi for BT_step2                                                   3.6124
## Precision for BT_step3                                          1254.4300
## Phi for BT_step3                                                   4.1185
## Precision for inla.group(S_Step1, n = 20, method = "quantile")     0.0734
## Precision for inla.group(S_Step2, n = 20, method = "quantile")     0.1568
## Precision for inla.group(S_Step3, n = 20, method = "quantile")     0.1476
## Beta for field1x                                                  -1.1234
## Beta for field1xx                                                  0.5776
## Beta for field2x                                                   0.3810
##                                                                  0.5quant
## Range for field1                                                   0.0177
## Stdev for field1                                                   2.8360
## Range for field2                                                   3.5422
## Stdev for field2                                                   0.2663
## Range for field3                                                   0.1791
## Stdev for field3                                                   1.2985
## Precision for ID_Int                                              23.4421
## Precision for WOY1                                                 4.4343
## Precision for SolHr1                                               3.3611
## Precision for WOY2                                                 4.3942
## Precision for SolHr2                                               3.3303
## Precision for WOY3                                                 4.1295
## Precision for SolHr3                                               1.6909
## Precision for BT_step1                                         14551.8962
## Phi for BT_step1                                                  11.2715
## Precision for BT_step2                                         14247.0872
## Phi for BT_step2                                                  10.8695
## Precision for BT_step3                                         13935.6528
## Phi for BT_step3                                                  13.0907
## Precision for inla.group(S_Step1, n = 20, method = "quantile")     0.1132
## Precision for inla.group(S_Step2, n = 20, method = "quantile")     0.2395
## Precision for inla.group(S_Step3, n = 20, method = "quantile")     0.2303
## Beta for field1x                                                  -1.0280
## Beta for field1xx                                                  0.7124
## Beta for field2x                                                   0.9990
##                                                                0.975quant
## Range for field1                                                   0.0221
## Stdev for field1                                                   3.2756
## Range for field2                                                  35.6041
## Stdev for field2                                                   2.3925
## Range for field3                                                   0.4213
## Stdev for field3                                                   2.4271
## Precision for ID_Int                                              44.8289
## Precision for WOY1                                                 7.2395
## Precision for SolHr1                                               6.6947
## Precision for WOY2                                                 7.0329
## Precision for SolHr2                                               6.2800
## Precision for WOY3                                                 7.4535
## Precision for SolHr3                                               3.3879
## Precision for BT_step1                                         71879.2631
## Phi for BT_step1                                                  81.4670
## Precision for BT_step2                                         70095.2536
## Phi for BT_step2                                                  77.8418
## Precision for BT_step3                                         69084.7297
## Phi for BT_step3                                                 102.9630
## Precision for inla.group(S_Step1, n = 20, method = "quantile")     0.1734
## Precision for inla.group(S_Step2, n = 20, method = "quantile")     0.3699
## Precision for inla.group(S_Step3, n = 20, method = "quantile")     0.3493
## Beta for field1x                                                  -0.9413
## Beta for field1xx                                                  0.8534
## Beta for field2x                                                   1.6174
##                                                                     mode
## Range for field1                                                  0.0174
## Stdev for field1                                                  2.8269
## Range for field2                                                  1.2084
## Stdev for field2                                                  0.0975
## Range for field3                                                  0.1485
## Stdev for field3                                                  1.1714
## Precision for ID_Int                                             21.1531
## Precision for WOY1                                                4.1824
## Precision for SolHr1                                              2.9677
## Precision for WOY2                                                4.1998
## Precision for SolHr2                                              3.0139
## Precision for WOY3                                                3.7600
## Precision for SolHr3                                              1.4910
## Precision for BT_step1                                         3560.0381
## Phi for BT_step1                                                  5.8646
## Precision for BT_step2                                         3266.8658
## Phi for BT_step2                                                  5.6613
## Precision for BT_step3                                         3386.2206
## Phi for BT_step3                                                  6.5966
## Precision for inla.group(S_Step1, n = 20, method = "quantile")    0.1083
## Precision for inla.group(S_Step2, n = 20, method = "quantile")    0.2275
## Precision for inla.group(S_Step3, n = 20, method = "quantile")    0.2217
## Beta for field1x                                                 -1.0235
## Beta for field1xx                                                 0.7092
## Beta for field2x                                                  0.9990
## 
## Expected number of effective parameters(std dev): 620.78(0.00)
## Number of equivalent replicates : 162.05 
## 
## Deviance Information Criterion (DIC) ...............: 39613.50
## Deviance Information Criterion (DIC, saturated) ....: NULL
## Effective number of parameters .....................: 638.63
## 
## Watanabe-Akaike information criterion (WAIC) ...: 39599.62
## Effective number of parameters .................: 603.02
## 
## Marginal log-Likelihood:  -20635.71 
## CPO and PIT are computed
## 
## Posterior marginals for linear predictor and fitted values computed
GetMets(Model1)
##   Metric     Tier.1     Tier.2    Tier.3
## 1    DIC 17726.1622 18106.0507 3781.2896
## 2   WAIC 17725.4505 18098.5090 3775.6641
## 3   lCPO     0.2643     0.2699    0.0563

4.5.4 Results

Compare to HMMs

idat = inla.stack.index(Joint.stk, "m.1")$data
FE.df$S1_pr = Model1$summary.fitted.values$mean[idat]

idat = inla.stack.index(Joint.stk, "m.2")$data
FE.df$S2_pr = Model1$summary.fitted.values$mean[idat]

idat = inla.stack.index(Joint.stk, "m.3")$data
FE.df$S3_pr = Model1$summary.fitted.values$mean[idat]

FE.df$SumChk = FE.df$S1_pr + FE.df$S2_pr + FE.df$S3_pr


cor(FE.df$M2_s1, FE.df$S1_pr)
## [1] 0.8940625
cor(FE.df$M2_s2, FE.df$S2_pr)
## [1] 0.8858296
cor(FE.df$M2_s3, FE.df$S3_pr)
## [1] 0.8597772

Weekly Movement State Probability

FE.df$NState = ifelse(FE.df$S1_pr > FE.df$S2_pr & FE.df$S1_pr > FE.df$S3_pr, "State1",
                      ifelse(FE.df$S2_pr > FE.df$S1_pr & FE.df$S2_pr > FE.df$S3_pr, "State2",
                          ifelse(FE.df$S3_pr > FE.df$S1_pr & FE.df$S3_pr > FE.df$S2_pr, "State3", NA)))

which(is.na(FE.df$NState))
## integer(0)
unique(FE.df$NState)
## [1] "State2" "State3" "State1"
FE.df %>%
  group_by(NState) %>%
  summarise(MeanS = median(step,na.rm=T),
            MeanA = median(angle,na.rm=T),
            Water = mean(wDs,na.rm=T))
## # A tibble: 3 x 4
##   NState MeanS  MeanA Water
##   <chr>  <dbl>  <dbl> <dbl>
## 1 State1 0.678 0.0515 12.1 
## 2 State2 3.04  0.0515  8.86
## 3 State3 7.96  0.0515 13.8
Plot.df = FE.df %>% 
          select(LocWK, NState) %>%
          group_by(LocWK, NState) %>%
          summarise(Count = length(NState))
 
Plot.df1 = Plot.df %>%
           group_by(LocWK) %>%
           summarise(Sum = sum(Count))

Plot.df2 = Plot.df %>%
           group_by(NState) %>%
           summarise(StateT = sum(Count))

Plot.df$TWeek =  with(Plot.df1, 
                       Sum[match(
                            Plot.df$LocWK,
                                     LocWK)])

Plot.df$TState =  with(Plot.df2, 
                       StateT[match(
                            Plot.df$NState,
                                     NState)])


Plot.df$StateProp = Plot.df$Count/Plot.df$TState

Plot.df3 = Plot.df %>%
           group_by(LocWK) %>%
           summarise(PropT = sum(StateProp))

Plot.df$WeekProp =  with(Plot.df3, 
                       PropT[match(
                            Plot.df$LocWK,
                                     LocWK)])

Plot.df$IndState = Plot.df$StateProp/Plot.df$WeekProp

SeasRanges = data.frame(
                from = c(1, 11, 24, 36, 48), 
                to = c(11, 24, 36, 48, 52),
                Season = as.factor(c("Overwinter", "Migration", "Breeding", "Migration", "Overwinter")))

colScale = scale_fill_manual(name = "Activity", 
                             values = c("lightgray", "gray65", "gray50"), 
                             labels = c("Foraging", "Short Flight", "Long Flight"))


ggplot() +
   geom_density(data = Plot.df, 
                aes(x = Plot.df$LocWK, y = Plot.df$IndState, fill = Plot.df$NState), stat="identity", 
                position="stack", 
                col ="transparent") +
   colScale +
   theme_classic() +
         xlab("Week of Year") +
         ylab("Activity Proportion") +
         scale_x_continuous(breaks = seq(0, 54, 13), 
                     labels = c("1", seq(13, 52, 13)),
                     limits =c(0,52)) +
         theme(plot.title = element_text(hjust = 0.5),
                legend.position = "bottom",
                legend.title=element_text(size=22), 
                legend.text=element_text(size=18),
                strip.background = element_blank(),
                title = element_text(face="bold", size=22, hjust=0.5),
                strip.text = element_text(face="bold", size = 22),
                axis.title.y = element_text(face="bold", size = 24),
                axis.text.x = element_text(face="bold", size=16, vjust=0.5, 
                                           hjust=0.5, angle=0),
                axis.text.y = element_text(face="bold", size=20),
                axis.title.x = element_text(face="bold", size = 22)) 

Daily Movement Probability

mic.d1 = as.data.frame(Model1$summary.random$SolHr1)
names(mic.d1) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975", "mode", "kld")
mic.d1$State = "Foraging"

mic.d2 = as.data.frame(Model1$summary.random$SolHr2)
names(mic.d2) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975", "mode", "kld")
mic.d2$State = "Short Flight"

mic.d3 = as.data.frame(Model1$summary.random$SolHr3)
names(mic.d3) = c("ID", "Mean", "sd", "Q025", "Q50", "Q975", "mode", "kld")
mic.d3$State = "Long Flight"

mic.d = rbind(mic.d1, mic.d2, mic.d3)

mic.d$State = ordered(factor(mic.d$State), levels = c("Foraging", "Short Flight", "Long Flight"))

mic.d$ID2 = mic.d$ID + 5
mic.d$ID2[mic.d$ID2 == 24] = 0
mic.d$ID2[mic.d$ID2 == 25] = 1
mic.d$ID2[mic.d$ID2 == 26] = 2
mic.d$ID2[mic.d$ID2 == 27] = 3
mic.d$ID2[mic.d$ID2 == 28] = 4

mic.d$ID = mic.d$ID2

Raw.plt2 = ggplot(mic.d, aes(ID, Mean)) +
                 geom_smooth(col = "black", 
                  linetype= "solid",
                  method = "loess",
                  span = 0.9,
                  se = FALSE,
                  lwd = 1) +
        geom_smooth(data = mic.d, aes(ID, Q025), 
                    col = "grey", 
                    method = "loess",
                    span = 0.9,
                    size = 0.5,
                    se = FALSE,
                    linetype= "dashed") +
        geom_smooth(data = mic.d, aes(ID, Q975), 
                    col = "grey", 
                    method = "loess",
                    span = 0.9,
                    size = 0.5,
                    se = FALSE,
                    linetype= "dashed") +
        geom_hline(yintercept = 0,
                   linetype = "solid",
                   col = "darkgray",
                   size = 0.5) +  
                  ylab("Movement State Probability (logit)") +
                  xlab("Solar Hour") +
                  #ggtitle("Aerial Counts (COMPRECNTDEN)") +
                  theme_classic() +
                  scale_y_continuous(breaks = seq(-1, 2, 0.5)) +
                                     #limits = c(-1, 1)) +
                  scale_x_continuous(breaks = seq(0, 24, 6),
                                     labels = c(seq(0, 18, 6), "23"),
                                     limits =c(0,23)) +
                  facet_grid(rows = vars(State)) +
                  theme(plot.title = element_text(hjust = 0.5),
                        title = element_text(face="bold", size=18, hjust=0.5),
                        axis.text=element_text(size=16),
                        strip.text = element_text(face="bold", size = 20),
                        axis.title.y = element_text(face="bold", size = 20),
                        axis.text.x = element_text(face="bold", size=18, vjust=0.5, 
                                                   hjust=0.5, angle=0),
                        axis.title.x = element_text(face="bold", size = 20))

Raw.plt2