This practical (TP-5) continues from TP-4 (model fitting and diagnostics). Here we focus on how to turn a stock assessment into management-advice.

Main learning outcomes

  1. Identify model conflict (different plausible models disagree)
  2. Combine scenarios (ensembles + weights)
  3. Run projections / forecasts under alternative TAC levels
  4. Quantify risk against reference points (B/Bmsy, F/Fmsy)
  5. Communicate results clearly (plots + key tables)

0) Load packages

library(JABBA)
library(dplyr)
library(tidyr)
library(ggplot2)

1) Load fitted models (from TP-4)

# Load models from TP-4:
load("TP4_JABBA_fits.RData")

# Put all scenarios to compare into a single list
models <- list(
  Base       = fit_base,
  NoSigmaEst = fit_no_sigma_est
)

cat("\nModels available:\n")
## 
## Models available:
print(names(models))
## [1] "Base"       "NoSigmaEst"

2) Model conflict

Model conflict means: key outputs differ across plausible scenarios.

Examples:

2.1) Simple model-fit metric (DIC)

# Extract the DIC from each model
models$Base$stats$Value[6]       # Base model
## [1] -387.7
models$NoSigmaEst$stats$Value[6] # Sensitivity model
## [1] -385.4

2.2) Table: compare key parameters (K, r, MSY)

key_pars <- c("K", "r", "MSY")

# Base model
tab_base <- as.data.frame(models$Base$estimates)
tab_base$Parameter <- rownames(tab_base)
tab_base <- tab_base[tab_base$Parameter %in% key_pars, ]
tab_base <- tab_base[, c("Parameter", colnames(tab_base)[1:3])]
colnames(tab_base)[2:4] <- c("Median", "LCI", "UCI")
tab_base$Model <- "Base"

# NoSigmaEst model
tab_nse <- as.data.frame(models$NoSigmaEst$estimates)
tab_nse$Parameter <- rownames(tab_nse)
tab_nse <- tab_nse[tab_nse$Parameter %in% key_pars, ]
tab_nse <- tab_nse[, c("Parameter", colnames(tab_nse)[1:3])]
colnames(tab_nse)[2:4] <- c("Median", "LCI", "UCI")
tab_nse$Model <- "NoSigmaEst"

# Combine
comp_tab <- rbind(tab_base, tab_nse)
comp_tab
##      Parameter          Median            LCI             UCI      Model
## K            K  76047.70435998 63306.57138706  98244.09118527       Base
## r            r      0.06426184     0.04744267      0.08667564       Base
## MSY        MSY   1247.92500000   854.76800000   1817.90100000       Base
## K1           K 121471.71166505 66657.78552087 164741.25992960 NoSigmaEst
## r1           r      0.06340057     0.04739772      0.08523556 NoSigmaEst
## MSY1       MSY   1805.21200000   936.15300000   3041.03300000 NoSigmaEst

2.3) Plot: compare key parameters (median ± 95% CrI)

ggplot(comp_tab, aes(x = Model, y = Median, colour = Model)) +
  geom_point(size = 2) +
  geom_errorbar(aes(ymin = LCI, ymax = UCI), width = 0.15) +
  facet_wrap(~ Parameter, scales = "free_y") +
  theme_bw() +
  labs(title = "JABBA model comparison (median ± 95% CrI)",
       y = "Estimate", x = NULL) +
  theme(legend.position = "none")

2.4) Compare full trajectories

jbplot_summary(models, plotCIs = TRUE)
## 
## ><> jbplot_summary()

2.5) Kobe plots (side-by-side)

par(mfrow = c(1, 2), mar = c(4, 4, 3, 1))

jbplot_kobe(models$Base, add = TRUE)
## 
## ><> jbplot_kobe() - Stock Status Plot  <><
mtext("Base", side = 3, line = 0.5, cex = 1)

jbplot_kobe(models$NoSigmaEst, add = TRUE)
## 
## ><> jbplot_kobe() - Stock Status Plot  <><
mtext("NoSigmaEst", side = 3, line = 0.5, cex = 1)

par(mfrow = c(1, 1))

Class discussion

  • What can you tell about stock productivity in each scenario?
  • Do the models tell the same story overall?
  • Do the models agree on current status (last years)?
  • What could explain the differences? (data conflicts, priors, model structure, etc.)

3) Building an ensemble (weighting)

In real assessments you might:

For this class we show two options:

  1. Equal weights (default)

  2. Manual weights (simple approximation)

# A) Equal-weight ensemble
ens_equal <- jbplot_ensemble(models, joint = TRUE, kbout = TRUE, plotCIs = TRUE)

# B) Manual weights (simple exercise for class)
w <- c(Base = 0.7, NoSigmaEst = 0.3)
w
##       Base NoSigmaEst 
##        0.7        0.3
sum(w)
## [1] 1
# Apply weights by replicating models (simple approximation)
models_weighted <- c(
  rep(list(models$Base),       7),
  rep(list(models$NoSigmaEst), 3)
)

ens_weighted <- jbplot_ensemble(models_weighted, joint = TRUE, kbout = TRUE, plotCIs = TRUE)

# Compare equal vs manual ensemble (mean B/Bmsy in fit period)
ens_comp <- bind_rows(
  ens_equal    %>% mutate(Ensemble = "Equal weights"),
  ens_weighted %>% mutate(Ensemble = "Manual weights")
)

ens_comp_fit <- ens_comp %>%
  filter(type == "fit") %>%
  group_by(year, Ensemble) %>%
  summarise(stock_mean = mean(stock, na.rm = TRUE), .groups = "drop")

ggplot(ens_comp_fit, aes(year, stock_mean, colour = Ensemble)) +
  geom_line(linewidth = 1.1) +
  theme_bw() +
  labs(title = "Ensemble comparison (fit period): mean B/Bmsy",
       x = "Year", y = "Mean B/Bmsy")

4) Projections / Forecasts (fw_jabba)

We project forward under different absolute TAC values (ICCAT style).

We first calculate current catch as the mean of the last 3 years.

# choose the ensemble to project
models_proj <- models_weighted

# current catch (mean of last 3 years)
catchN <- read.csv("catchN_SMA_IOTC_2024.csv")
Ccur <- mean(tail(catchN$SMA_reported, 3), na.rm = TRUE)
Ccur
## [1] 3042.863
# projection settings
TACs      <- seq(0, 4000, by = 250)
nyears    <- 20
imp_delay <- 2

fw <- fw_jabba(
  models_proj,
  nyears = nyears,
  imp.yr = imp_delay,
  imp.values = TACs,
  quant = "Catch",
  type = "abs",
  nsq = 3,
  stochastic = TRUE,
  initial = Ccur
)

# Quick multi-panel forecast plot
jbpar(mfrow = c(2, 2))
jbplot_ensemble(fw, add = TRUE, subplots = 1, legend.loc = "topright", plotCIs = FALSE) # B/Bmsy
jbplot_ensemble(fw, add = TRUE, subplots = 2, legend = FALSE, plotCIs = FALSE)         # F/Fmsy
jbplot_ensemble(fw, add = TRUE, subplots = 5, legend = FALSE, plotCIs = TRUE)          # deviations
jbplot_ensemble(fw, add = TRUE, subplots = 6, legend = FALSE, plotCIs = FALSE)         # catch


5) Kobe II strategy matrix (KIISM)

Managers often want a strategy matrix giving, for each TAC level and year, the probability of being in the Green quadrant:

\[\; P(\text{Green}) = P(B/B_{MSY} > 1 \;\text{and}\; F/F_{MSY} < 1) \;\]

# Extract ensemble output from projections
proj <- jbplot_ensemble(fw, joint = TRUE, kbout = TRUE, plotCIs = FALSE)

# last fitted year (from the "joint" run)
last_fit_year <- max(proj$year[proj$run == "joint"], na.rm = TRUE)

# keep projection years and TAC runs
proj2 <- proj %>%
  filter(run != "joint", year > last_fit_year) %>%
  mutate(
    TAC   = as.numeric(gsub("[^0-9]", "", run)),
    green = (stock > 1 & harvest < 1)
  )

# probability of green by TAC and year
p_green <- proj2 %>%
  group_by(TAC, year) %>%
  summarise(P_green = mean(green, na.rm = TRUE), .groups = "drop")

# keep every 2 years
yrs <- sort(unique(p_green$year))
yrs_keep <- yrs[seq(1, length(yrs), by = 2)]

p_green_2y <- p_green %>%
  filter(year %in% yrs_keep)

# Kobe II table
kobe2_tab <- p_green_2y %>%
  mutate(P_green = round(P_green, 2)) %>%
  pivot_wider(names_from = year, values_from = P_green) %>%
  arrange(TAC)

kobe2_tab
## # A tibble: 17 × 11
##      TAC `2023` `2025` `2027` `2029` `2031` `2033` `2035` `2037` `2039` `2041`
##    <dbl>  <dbl>  <dbl>  <dbl>  <dbl>  <dbl>  <dbl>  <dbl>  <dbl>  <dbl>  <dbl>
##  1     0      0   0.21   0.29   0.36   0.42   0.47   0.51   0.56   0.59   0.62
##  2   250      0   0.21   0.27   0.33   0.38   0.42   0.46   0.5    0.53   0.55
##  3   500      0   0.2    0.26   0.31   0.35   0.38   0.41   0.44   0.46   0.49
##  4   750      0   0.2    0.25   0.28   0.31   0.34   0.36   0.39   0.4    0.42
##  5  1000      0   0.19   0.23   0.25   0.28   0.3    0.32   0.33   0.34   0.35
##  6  1250      0   0.15   0.18   0.21   0.23   0.24   0.25   0.26   0.27   0.27
##  7  1500      0   0.11   0.13   0.15   0.16   0.17   0.18   0.19   0.19   0.19
##  8  1750      0   0.06   0.08   0.1    0.11   0.11   0.12   0.13   0.13   0.13
##  9  2000      0   0.04   0.05   0.06   0.07   0.08   0.08   0.08   0.09   0.09
## 10  2250      0   0.02   0.04   0.04   0.05   0.05   0.06   0.06   0.06   0.06
## 11  2500      0   0.02   0.02   0.03   0.03   0.04   0.04   0.04   0.04   0.04
## 12  2750      0   0.01   0.02   0.02   0.02   0.03   0.03   0.03   0.03   0.03
## 13  3000      0   0.01   0.01   0.01   0.02   0.02   0.02   0.02   0.02   0.02
## 14  3250      0   0      0.01   0.01   0.01   0.01   0.01   0.01   0.01   0.02
## 15  3500      0   0      0      0.01   0.01   0.01   0.01   0.01   0.01   0.01
## 16  3750      0   0      0      0      0.01   0.01   0.01   0.01   0.01   0.01
## 17  4000      0   0      0      0      0      0.01   0.01   0.01   0.01   0.01

Heatmap (optional)

ggplot(p_green_2y, aes(x = year, y = TAC, fill = P_green)) +
  geom_tile() +
  scale_y_reverse() +
  scale_fill_gradient(low = "lightblue", high = "darkblue") +
  theme_bw() +
  labs(title = "Kobe II Strategy Matrix: P(Green)",
       x = "Year", y = "TAC", fill = "P(Green)")


6) Simple HCR example

A Harvest Control Rule (HCR) is a pre-defined decision rule.

For this class, we illustrate a very simple B-based HCR (ignoring F for simplicity):

# Current B/Bmsy from the last fitted year (ensemble)
last_fit_year <- max(ens_weighted$year[ens_weighted$type == "fit"], na.rm = TRUE)

BB_current <- ens_weighted %>%
  filter(type == "fit", year == last_fit_year) %>%
  summarise(BB = mean(stock, na.rm = TRUE)) %>%
  pull(BB)

cat("\nCurrent (mean) B/Bmsy in last fit year =", round(BB_current, 2), "\n")
## 
## Current (mean) B/Bmsy in last fit year = 0.79
# Simple HCR multiplier
mult_hcr <- ifelse(BB_current < 1.0, 0.8,
                   ifelse(BB_current < 1.2, 1.0, 1.1))

cat("HCR suggested catch multiplier =", mult_hcr, "\n")
## HCR suggested catch multiplier = 0.8
# Convert multiplier to an absolute TAC
TAC_hcr <- mult_hcr * Ccur
cat("HCR TAC (raw) =", round(TAC_hcr, 0), "\n")
## HCR TAC (raw) = 2434
# Choose closest TAC from the grid evaluated in projections
TAC_choice <- TACs[which.min(abs(TACs - TAC_hcr))]
cat("Closest TAC tested in projections =", TAC_choice, "\n")
## Closest TAC tested in projections = 2500
# Extract P(Green) over time for this TAC
hcr_kobe2 <- p_green_2y %>%
  filter(TAC == TAC_choice) %>%
  arrange(year)

cat("\nKobe II (P(Green)) for HCR TAC over time:\n")
## 
## Kobe II (P(Green)) for HCR TAC over time:
print(hcr_kobe2)
## # A tibble: 10 × 3
##      TAC  year P_green
##    <dbl> <int>   <dbl>
##  1  2500  2023 0.00236
##  2  2500  2025 0.0157 
##  3  2500  2027 0.0239 
##  4  2500  2029 0.0288 
##  5  2500  2031 0.0328 
##  6  2500  2033 0.0360 
##  7  2500  2035 0.0390 
##  8  2500  2037 0.0401 
##  9  2500  2039 0.0411 
## 10  2500  2041 0.0423
# Plot the HCR-selected strategy row
ggplot(hcr_kobe2, aes(x = year, y = P_green)) +
  geom_line(linewidth = 1.1) +
  geom_point(size = 2) +
  theme_bw() +
  labs(title = paste0("HCR-selected TAC = ", TAC_choice, " : P(Green) over time"),
       x = "Year", y = "P(Green)")


Student exercises

  1. Try different imp_delay values (0 vs 2 vs 3). What changes?
  2. Compare equal weights vs manual weights. Does advice change?
  3. Project for a longer period. What changes? What is the risk?

Group discussion

  1. Why is the HCR behaving like this?
  2. Would you (scientist) prefer to advise using KIISM/Kobe II, or a fixed HCR?
  3. If you were a fisherman, what would you prefer?

Communication: what to tell managers

  1. State the current status (B/Bmsy, F/Fmsy) + uncertainty
  2. Explain key assumptions (indices used, priors, catch history)
  3. Show the KIISM, explain probabilities, and trade-offs across TAC options
## 
## WELL DONE! You produced your first management recommendations for a fishing stock.