Flow flashiness and B-IBI

Author

Curtis DeGasperi

Published

May 5, 2025

I missed recent discussions regarding the future of Cleaner Controlled Stormwater. However, I thought I would update some of the flow flashiness analyses I’ve presented previously. The analysis focused on 36 long-term (2002-2024) flow gauging locations distributed throughout King County.

Based on the daily mean flow records, I calculated annual High Pulse Count (HPC; number of flow pulses greater than twice the mean flow), Richards-Baker Index (RBI; flow pathlength divided by total discharge), and TQmean (proportion of time flow is greater than the mean flow). I then conducted non-parametric Mann-Kendall trend tests and calculated the likelihood of the trend using Bayesian credible intervals after McBride (2019).

The results of this analysis are presented in the graph below that summarizes the number of stations that are trending towards greater flashiness (degrading), less flashiness (improving), or uncertain (not clearly improving or degrading). From this analysis it appears that flow flashiness as measured by RBI and TQmean are generally improving, while flashiness as measured by HPC is a mix with mostly uncertain trend direction and more stations degrading rather than improving.

code to load, process, and plot the hydrologic metric trend summary plot
library(tidyverse)
library(readr)
library(ggplot2)
library(dplyr)
library(mapview)
library(leaflet)
library(leafpop)
library(lattice)
library(sp)

source("W:/Users/UsersSTS1/degaspec/R/TDA/TDA_kendall.R")

# infile2 <- readRDS('infile2.rds')
infile2 <- readRDS('infile_new.rds') %>% filter(!SITE_CODE == '51T')

summary.infile2 <- infile2 %>% group_by(SITE_CODE,Metric) %>% summarize(n = n(),minYr = min(water_year),
                                                                        maxYr = max(water_year)) %>% 
  filter(n>15)

infile3 <- filter(infile2,SITE_CODE %in% unique(summary.infile2$SITE_CODE)) %>% ungroup()

res <- NULL
cnt <- 1
for (code in unique(infile3$SITE_CODE)){
  for (metric in unique(infile3$Metric)){
    # print('Hello world')
    tmp <- infile3 %>% filter(SITE_CODE==code&Metric==metric&!is.na(Value)) %>% select(water_year,Value,lat,lon,SITE_NAME)
   # print(paste0(cnt,' ',code,' ',metric))
    dum <- data.frame(with(tmp,TDA_kendall(water_year,Value,kendall.conf.level = 0.95)))
    dum$lat <- tmp$lat[1]
    dum$lon <- tmp$lon[1]
    dum$Site_Code <- code
    dum$Site_Name <- tmp$SITE_NAME[1]
    dum$Metric <- metric
    res <- rbind(res,dum)
    cnt <- cnt + 1
  }
}

res$Likelihood.of.Positive.Slope <- factor(res$Likelihood.of.Positive.Slope, levels = c("Virtually certain","Extremely likely","Very likely","Likely","About as likely as not","Unlikely","Very unlikely","Extremely unlikely","Exceptionally unlikely"))
res$Likelihood.of.Negative.Slope <- factor(res$Likelihood.of.Negative.Slope, levels = c("Virtually certain","Extremely likely","Very likely","Likely","About as likely as not","Unlikely","Very unlikely","Extremely unlikely","Exceptionally unlikely"))

res$Likelihood.of.Positive.Slope <- fct_rev(res$Likelihood.of.Positive.Slope)

### aggregate factors to three categories for positive slopes
res$TDA_pos <- res$Likelihood.of.Positive.Slope
levels(res$TDA_pos) <- list(Degrading=c("Unlikely","Very unlikely","Extremely unlikely","Exceptionally unlikely"),Uncertain=c("About as likely as not"),Improving=c("Virtually certain","Extremely likely","Very likely","Likely"))

res$TDA_neg <- res$Likelihood.of.Negative.Slope
levels(res$TDA_neg) <- list(Degrading=c("Unlikely","Very unlikely","Extremely unlikely","Exceptionally unlikely"),Uncertain=c("About as likely as not"),Improving=c("Virtually certain","Extremely likely","Very likely","Likely"))

res$TDA_pos <- as.character(res$TDA_pos)
res$TDA_neg <- as.character(res$TDA_neg)
res$sen.slope.neg <- abs(res$sen.slope)
# res <- merge(res,df.summary,by.x="SiteCode",by.y="Sites")
res$TDA_pos <- factor(res$TDA_pos,levels=c("Degrading","Uncertain","Improving"))
res$TDA_neg <- factor(res$TDA_neg,levels=c("Degrading","Uncertain","Improving"))

res <- res %>% arrange(Metric,sen.slope)

summary.pos <- res %>% group_by(Metric,TDA_pos) %>% filter(Metric == "TQmean") %>% count(as.character(TDA_pos))
summary.pos <- ungroup(summary.pos) %>%  rename(TDA="as.character(TDA_pos)") %>% select(Metric,TDA,n)
summary.neg <- res %>% group_by(Metric,TDA_neg) %>% filter(Metric != "TQmean") %>% count(as.character(TDA_neg))
summary.neg <- ungroup(summary.neg) %>%  rename(TDA="as.character(TDA_neg)") %>% select(Metric,TDA,n)

summary.res <- rbind(summary.pos,summary.neg)

summary.res$TDA <- ifelse(summary.res$TDA=="Maintaining","Uncertain",summary.res$TDA)

summary.res$TDA <- factor(summary.res$TDA,levels=c("Degrading","Uncertain","Improving"))

summary.res <- summary.res %>% group_by(Metric) %>% 
  mutate(Percent = 100*n/sum(n)) %>% 
  ungroup()

ggplot(summary.res,aes(x=Metric,y=n,fill=TDA)) + geom_bar(stat = "identity") +
  labs(fill = "Trend Detection Assessment") + xlab("") + ylab("Number of Stations") +
  theme(legend.title = element_text(size = 12,face="bold"), legend.text = element_text(size=14),axis.text=element_text(size=12,face="bold"),
        axis.title=element_text(size=14,face="bold")) +
  scale_fill_manual(values=alpha(c("red", "gray", "green"),0.7)) +
  geom_text(aes(x=Metric, y=n, label=paste0(round(Percent,0),'%')), position=position_stack(vjust=0.5),
            color='white',size = 6)



The spatial distribution of these results can be mapped out. First for HPC. Note: , Click on a site symbol to view the annual values behind the trend.



code to map the spatial trend results for HPC
# library(ggmap)
# 
# load(file = "ps_map.RData")
# 
# testmap <- ggmap(map) + geom_point(data=res %>% filter(Metric == 'HPC'),aes(x=lon,y=lat,color=TDA_neg), size = 3) +
#   scale_color_manual(values = c("red","darkgray","green")) +
#   guides(color= guide_legend("HPC Trend")) 
# testmap  

tmp <- filter(res, Metric == 'HPC')
mylabel <- glue::glue("{tmp$Site_Name}<br /> {tmp$Site_Code}") %>% 
  lapply(htmltools::HTML)

coordinates(infile3) <- ~ lon + lat
plot_list <- list()
for(i in 1:nrow(tmp)){
  site = tmp$Site_Code[i]
  plot_list[[i]] <- ggplot(infile3@data %>% filter(Metric=='HPC'&SITE_CODE == site),
                   aes(water_year,Value)) +
    geom_point() +
    geom_smooth() + ggtitle(paste0(tmp$Site_Code[i]," (",tmp$Site_Name[i],")")) +
    labs(x="",y="Count")
}

mapview() + 
  mapview(tmp, zcol = "TDA_neg", xcol = "lon", ycol = "lat", crs = 4269, grid = FALSE, layer.name = 'HPC Trend',
          col.regions = c("#b82d3b","#fad24b","#33a236"), label = mylabel,
        popup = popupGraph(plot_list)) 



Then for RBI… Click on a site symbol to view the annual values behind the trend.

code to map the spatial trend results for RBI
# testmap <- ggmap(map) + geom_point(data=res %>% filter(Metric == 'RBI'),aes(x=lon,y=lat,color=TDA_neg), size = 3) +
#   scale_color_manual(values = c("red","darkgray","green")) +
#   guides(color= guide_legend("RBI Trend")) 
# testmap  

tmp <- filter(res, Metric == 'RBI')
mylabel <- glue::glue("{tmp$Site_Name}<br /> {tmp$Site_Code}") %>% 
  lapply(htmltools::HTML)

plot_list <- list()
for(i in 1:nrow(tmp)){
  site = tmp$Site_Code[i]
  plot_list[[i]] <- ggplot(infile3@data %>% filter(Metric=='RBI'&SITE_CODE == site),
                   aes(water_year,Value)) +
    geom_point() +
    geom_smooth() + ggtitle(paste0(tmp$Site_Code[i]," (",tmp$Site_Name[i],")")) +
    labs(x="",y="Proportion")
}

mapview() + 
  mapview(tmp, zcol = "TDA_neg", xcol = "lon", ycol = "lat", crs = 4269, grid = FALSE, layer.name = 'RBI Trend',
          col.regions = c("#b82d3b","#fad24b","#33a236"), label = mylabel,
        popup = popupGraph(plot_list)) 



And then for TQmean… Click on a site symbol to view the annual values behind the trend.

code to map the spatial trend results for TQmean
# testmap <- ggmap(map) + geom_point(data=res %>% filter(Metric == 'TQmean'),aes(x=lon,y=lat,color=TDA_pos), size = 3) +
#   scale_color_manual(values = c("red","darkgray","green")) +
#   guides(color= guide_legend("TQmean Trend")) 
# testmap  

tmp <- filter(res, Metric == 'TQmean')
mylabel <- glue::glue("{tmp$Site_Name}<br /> {tmp$Site_Code}") %>% 
  lapply(htmltools::HTML)

plot_list <- list()
for(i in 1:nrow(tmp)){
  site = tmp$Site_Code[i]
  plot_list[[i]] <- ggplot(infile3@data %>% filter(Metric=='TQmean'&SITE_CODE == site),
                   aes(water_year,Value)) +
    geom_point() +
    geom_smooth() + ggtitle(paste0(tmp$Site_Code[i]," (",tmp$Site_Name[i],")")) +
    labs(x="",y="Proportion")
}

mapview() + 
  mapview(tmp, zcol = "TDA_pos", xcol = "lon", ycol = "lat", crs = 4269, grid = FALSE, layer.name = 'TQmean Trend',
          col.regions = c("#b82d3b","#fad24b","#33a236"), label = mylabel,
        popup = popupGraph(plot_list)) 



We can also do something similar for B-IBI…here for 87 locations (2002-2024) …

code to load, process, and plot the B-IBI trend summary plot
df1 <- read_tsv('./data/pssb/ScoresPart1_KingCoAmbient2024.txt')
df2 <- read_tsv('./data/pssb/ScoresPart2_KingCoAmbient2024.txt')
df3 <- read_tsv('./data/pssb/Scores_Vashon2024.txt')
df4 <- read_tsv('./data/pssb/Scores_Boise2024.txt')
df5 <- read_tsv('./data/pssb/Scores_MillerWalker2024.txt')

dfw <- bind_rows(df1,df2,df3,df4,df5) %>% mutate(Year = year(as.Date(`Event Date`,format="%m/%d/%Y")),
                                                 Value = `Overall Score`) %>% distinct(`Site Code`,Year, .keep_all = TRUE)

# check for river sites and remove any present
snoq_river <- c("07LMS093027","07SFS011059","07LMS016051","07SFS009443",
                "07LMS213875","07LMS026663")
dfw <- filter(dfw,!`Site Code` %in% snoq_river)

# filter to period 2002-2024 sites with not more than 3 missing years
summary.f <- dfw %>% filter(year(as.Date(`Event Date`,format="%m/%d/%Y"))>=2002) %>%  group_by(`Site ID`,`WRIA`,`Basin`,`Subbasin`,`Stream`,`Latitude`,`Longitude`,`Site Code`) %>% 
  summarize(n = length(`Overall Score`), Year_min = min(year(as.Date(`Event Date`,format="%m/%d/%Y"))),
            Year_max = max(year(as.Date(`Event Date`,format="%m/%d/%Y")))) %>% 
  filter(n>=20) %>% 
  ungroup()


# remove earlier values (Year<2002)
dfw <- dfw %>% filter(`Site Code` %in% unique(summary.f$`Site Code`)&between(Year,2002,2024)) %>% 
  mutate(Years = as.character(cut(Year,breaks = c(2000,2005,2010,2015,2020,2025),labels = c('2005','2010','2015','2020','2025'))))

dfw <- arrange(dfw,`Site Code`,Year)
res <- NULL
cnt <- 1
for (code in unique(summary.f$`Site Code`)){
  # print('Hello world')
  tmp <- dfw %>% filter(Year>=2002&Year<=2024&`Site Code`==code&!is.na(Value)) # %>% select(Year,Value)
  # print(paste0(cnt,' ',code))
  dum <- data.frame(with(tmp,TDA_kendall(Year,Value,kendall.conf.level = 0.95)))
  dum$Stream <- tmp$Stream[1]
  dum$`Site Code` <- code
  dum$lat <- tmp$Latitude[1]
  dum$lon <- tmp$Longitude[1]
  res <- rbind(res,dum)
  cnt <- cnt + 1
  
}

res$Likelihood.of.Positive.Slope <- factor(res$Likelihood.of.Positive.Slope, levels = c("Virtually certain","Extremely likely","Very likely","Likely","About as likely as not","Unlikely","Very unlikely","Extremely unlikely","Exceptionally unlikely"))
res$Likelihood.of.Negative.Slope <- factor(res$Likelihood.of.Negative.Slope, levels = c("Virtually certain","Extremely likely","Very likely","Likely","About as likely as not","Unlikely","Very unlikely","Extremely unlikely","Exceptionally unlikely"))

### aggregate factors to three categories for positive slopes
res$TDA_pos <- res$Likelihood.of.Positive.Slope
levels(res$TDA_pos) <- list(Decreasing=c("Unlikely","Very unlikely","Extremely unlikely","Exceptionally unlikely"),Uncertain=c("About as likely as not"),Increasing=c("Virtually certain","Extremely likely","Very likely","Likely"))

res$TDA_neg <- res$Likelihood.of.Negative.Slope
levels(res$TDA_neg) <- list(Increasing=c("Unlikely","Very unlikely","Extremely unlikely","Exceptionally unlikely"),Uncertain=c("About as likely as not"),Decreasing=c("Virtually certain","Extremely likely","Very likely","Likely"))

summary.pos <- res %>% group_by(TDA_pos) %>% count(as.character(TDA_pos))
summary.pos <- ungroup(summary.pos) %>%  rename(TDA="as.character(TDA_pos)") %>% select(TDA,n)
summary.pos <- mutate(summary.pos,Metric = 'B-IBI')
summary.pos$TDA <- factor(summary.pos$TDA,levels=c("Decreasing","Uncertain","Increasing"))

summary.pos <- summary.pos %>%
  mutate(Percent = 100*n/nrow(res)) %>% 
  ungroup()

ggplot(summary.pos,aes(x=Metric,y=n,fill=TDA)) + geom_bar(stat = "identity") +
  labs(fill = "Trend Detection Assessment") + xlab("B-IBI") + ylab("Number of Stations") +
  theme(legend.title = element_text(size = 20,face="bold"), legend.text = element_text(size=18),axis.text=element_text(size=16,face="bold"),
        axis.title=element_text(size=18,face="bold"),strip.text.x = element_text(size = 20),
        axis.text.x=element_blank(), axis.ticks.x=element_blank()) +
  scale_fill_manual(values=alpha(c("red", "gray", "green"),0.7)) +
  guides(fill=guide_legend(title="B-IBI Trend")) +
  geom_text(aes(x=Metric, y=n, label=paste0(round(Percent,0),'%')), position=position_stack(vjust=0.5),
            color='white',size = 6)



And a map of course…. Click on a site symbol to view the annual values behind the trend.

code to map the spatial trend results for B-IBI
# testmap <- ggmap(map) + geom_point(data=res,aes(x=,y=lat,color=TDA_pos), size = 3) +
#   scale_color_manual(values = c("red","darkgray","green")) +
#   guides(color= guide_legend("B-IBI Trend")) 
# testmap  

mylabel <- glue::glue("{res$Stream}<br /> {res$`Site Code`}") %>% 
  lapply(htmltools::HTML)

coordinates(dfw) <- ~Longitude + Latitude

plot_list <- list()
for(i in 1:nrow(res)){
  site = res$`Site Code`[i]
  plot_list[[i]] <- ggplot(dfw@data %>% filter(`Site Code` == site),
                   aes(Year,Value)) +
    geom_point() +
    geom_smooth() + ggtitle(paste0(res$Stream[i]," (",res$`Site Code`[i],")")) +
    labs(x="",y="B-IBI Score")
}

mapview() + 
  mapview(res, zcol = "TDA_pos", xcol = "lon", ycol = "lat", crs = 4269, grid = FALSE, layer.name = 'B-IBI Trend',
          col.regions = c("#b82d3b","#fad24b","#33a236"), label = mylabel,
        popup = popupGraph(plot_list))