1 OVERVIEW

This R Markdown implements the missing tests identified from the paper outline based on the Goldstein & Pauzner (2005) framework. These tests are critical for:

  1. Testing Strategic Complementarities: Systematic vs idiosyncratic MTM decomposition
  2. DW Crisis Window Analysis: March 8-12 and March 8-31 windows
  3. BTFP Collateral Mechanics vs Run Dynamics: MTM_BTFP × Uninsured interaction
  4. Risk 3 Bank Behavior: High MTM, low uninsured banks
  5. Intensive Margin Par Benefit: Explicit insignificance test

2 SETUP AND CONFIGURATION

2.1 Load Packages

library(tidyverse)
library(data.table)
library(lubridate)
library(fixest)
library(modelsummary)
library(kableExtra)
library(scales)
library(ggplot2)
library(patchwork)
library(broom)
library(haven)  # For state data
library(car)    # For linearHypothesis tests
library(marginaleffects)  # For marginal effects plots

cat("All packages loaded successfully.\n")
## All packages loaded successfully.

2.2 Paths and Key Dates

# ==============================================================================
# PATHS
# ==============================================================================
BASE_PATH <- "C:/Users/mferdo2/OneDrive - Louisiana State University/Finance_PhD/DW_Stigma_paper/Liquidity_project_2025"
DATA_PROC <- file.path(BASE_PATH, "01_data/processed")
OUTPUT_PATH <- file.path(BASE_PATH, "03_documentation/crisis_borrowing_result_all/additional_tests")
TABLE_PATH <- file.path(OUTPUT_PATH, "tables")
FIG_PATH <- file.path(OUTPUT_PATH, "figures")

for (path in c(TABLE_PATH, FIG_PATH)) {
  if (!dir.exists(path)) dir.create(path, recursive = TRUE)
}

# ==============================================================================
# KEY DATES - EXTENDED FOR MISSING TESTS
# ==============================================================================
DATE_MAR08 <- as.Date("2023-03-08")   # SVB troubles began
DATE_MAR10 <- as.Date("2023-03-10")   # SVB failure
DATE_MAR11 <- as.Date("2023-03-11")   # Weekend borrowing
DATE_MAR12 <- as.Date("2023-03-12")   # Silvergate failure, BTFP announced
DATE_MAR13 <- as.Date("2023-03-13")   # BTFP operational
DATE_MAR31 <- as.Date("2023-03-31")   # High DW borrowing through this date

# Period definitions
ACUTE_START <- as.Date("2023-03-13")
ACUTE_END <- as.Date("2023-05-01")
POST_ACUTE_END <- as.Date("2023-10-31")
ARB_START <- as.Date("2023-11-01")
ARB_END <- as.Date("2024-01-24")
DW_DATA_END <- as.Date("2023-09-30")

# Baseline periods
BASELINE_MAIN <- "2022Q4"
BASELINE_ARB <- "2023Q3"

2.3 Helper Functions

# ==============================================================================
# HELPER FUNCTIONS
# ==============================================================================

# Winsorization (2.5% / 97.5%)
winsorize <- function(x, probs = c(0.025, 0.975)) {
  if (all(is.na(x))) return(x)
  q <- quantile(x, probs = probs, na.rm = TRUE, names = FALSE)
  pmax(pmin(x, q[2]), q[1])
}

# Z-score standardization
standardize_z <- function(x) {
  if (all(is.na(x))) return(x)
  (x - mean(x, na.rm = TRUE)) / sd(x, na.rm = TRUE)
}

# Safe division
safe_div <- function(num, denom, default = NA_real_) {
  ifelse(is.na(denom) | denom == 0, default, num / denom)
}

# Size categories
create_size_category <- function(assets_thousands) {
  assets_millions <- assets_thousands / 1000
  case_when(
    assets_millions >= 100000 ~ "Large (>$100B)",
    assets_millions >= 1000   ~ "Medium ($1B-$100B)",
    TRUE                      ~ "Small (<$1B)"
  )
}

# ==============================================================================
# CUSTOM LATEX TABLE FORMATTER - PUBLICATION QUALITY
# ==============================================================================

# Save regression table with custom LaTeX formatting
save_reg_table <- function(models, filename, title_text = "", notes_text = "",
                           coef_map_use = NULL, gof_map_use = NULL, add_rows_use = NULL,
                           label_text = NULL, control_vars = NULL, ...) {
  
  # Generate label from filename if not provided
  if (is.null(label_text)) {
    label_text <- paste0("tab:", tolower(gsub("Table_", "", filename)))
  }
  
  # Default control variables (to add separator line)
  if (is.null(control_vars)) {
    control_vars <- c("Log(Assets) (z)", "Cash Ratio (z)", "Loan/Deposit (z)", 
                      "Book Equity (z)", "Wholesale (z)", "ROA (z)")
  }
  
  # HTML version (for viewing)
  modelsummary(models, stars = c('*' = 0.1, '**' = 0.05, '***' = 0.01),
               coef_map = coef_map_use, gof_map = gof_map_use, add_rows = add_rows_use,
               title = title_text, notes = notes_text,
               output = file.path(TABLE_PATH, paste0(filename, ".html")), ...)
  
  # Generate LaTeX with custom formatting
  # FIX APPLIED HERE: Wrapped in as.character()
  latex_raw <- as.character(modelsummary(models, stars = c('*' = 0.1, '**' = 0.05, '***' = 0.01),
                                         coef_map = coef_map_use, gof_map = gof_map_use, 
                                         add_rows = add_rows_use, output = "latex_tabular", ...))
  
  # Get number of models
  n_models <- length(models)
  col_spec <- paste0("@{}l", paste(rep("c", n_models), collapse = ""), "@{}")
  
  # Build model headers
  model_headers <- paste(paste0("(", seq_len(n_models), ")"), collapse = " & ")
  
  # Process the raw LaTeX to add control separator
  latex_lines <- strsplit(latex_raw, "\n")[[1]]
  
  # Find first control variable and add separator before it
  processed_lines <- c()
  control_added <- FALSE
  for (line in latex_lines) {
    # Check if this line contains the first control variable
    if (!control_added && any(sapply(control_vars, function(cv) grepl(cv, line, fixed = TRUE)))) {
      processed_lines <- c(processed_lines, "\\addlinespace[4pt]")
      processed_lines <- c(processed_lines, "\\textit{Controls} & & & & & \\\\")
      control_added <- TRUE
    }
    processed_lines <- c(processed_lines, line)
  }
  
  # Combine processed lines
  latex_body <- paste(processed_lines, collapse = "\n")
  
  # Build complete LaTeX table
  latex_complete <- paste0(
    "\\begin{table}[htbp]\n",
    "\\centering\n",
    "\\caption{", title_text, "}\n",
    "\\label{", label_text, "}\n",
    "\n",
    "\\vspace{0.5em}\n",
    "\\begin{threeparttable}\n",
    "\\small\n",
    latex_body, "\n",
    "\\begin{tablenotes}[flushleft]\n",
    "\\footnotesize\n",
    "\\item \\textit{Notes:} ", notes_text, "\n",
    "\\item $^{***}p<0.01$; $^{**}p<0.05$; $^{*}p<0.10$.\n",
    "\\end{tablenotes}\n",
    "\\end{threeparttable}\n",
    "\\end{table}"
  )
  
  # Write to file
  writeLines(latex_complete, file.path(TABLE_PATH, paste0(filename, ".tex")))
  cat("Saved:", filename, "(HTML + LaTeX)\n")
}

# Simplified table save for non-regression tables (summary stats, etc.)
save_simple_latex_table <- function(df, filename, title_text = "", notes_text = "",
                                    col_names = NULL, label_text = NULL, digits = 3) {
  
  if (is.null(label_text)) {
    label_text <- paste0("tab:", tolower(gsub("Table_", "", filename)))
  }
  
  if (is.null(col_names)) {
    col_names <- names(df)
  }
  
  # Generate LaTeX body using kable
  latex_body <- df %>%
    kable(format = "latex", 
          col.names = col_names,
          digits = digits,
          booktabs = TRUE,
          escape = FALSE) %>%
    as.character()
  
  # Build complete table
  latex_complete <- paste0(
    "\\begin{table}[htbp]\n",
    "\\centering\n",
    "\\caption{", title_text, "}\n",
    "\\label{", label_text, "}\n",
    "\n",
    "\\vspace{0.5em}\n",
    "\\begin{threeparttable}\n",
    "\\small\n",
    latex_body, "\n",
    "\\begin{tablenotes}[flushleft]\n",
    "\\footnotesize\n",
    "\\item \\textit{Notes:} ", notes_text, "\n",
    "\\end{tablenotes}\n",
    "\\end{threeparttable}\n",
    "\\end{table}"
  )
  
  writeLines(latex_complete, file.path(TABLE_PATH, paste0(filename, ".tex")))
  cat("Saved:", filename, ".tex\n")
}

# Save figure
save_figure <- function(plot_obj, filename, width = 12, height = 8) {
  ggsave(file.path(FIG_PATH, paste0(filename, ".pdf")), plot = plot_obj, 
         width = width, height = height, device = "pdf")
  ggsave(file.path(FIG_PATH, paste0(filename, ".png")), plot = plot_obj, 
         width = width, height = height, dpi = 300, device = "png")
  cat("Saved:", filename, "(PDF + PNG)\n")
}

# ==============================================================================
# STANDARD CONTROLS - DEFINED ONCE FOR CONSISTENCY ACROSS ALL ANALYSES
# ==============================================================================
CONTROLS <- "ln_assets + cash_ratio + loan_to_deposit + book_equity_ratio + wholesale + roa"

# Build formula
build_formula <- function(dv, explanatory, controls = CONTROLS) {
  as.formula(paste(dv, "~", explanatory, "+", controls))
}

# Coefficient map
COEF_MAP <- c(
  "mtm_total" = "MTM Loss (z)",
  "mtm_btfp" = "MTM Loss BTFP (z)",
  "uninsured_lev" = "Uninsured Lev (z)",
  "mtm_x_uninsured" = "MTM × Uninsured",
  "mtm_total:uninsured_lev" = "MTM × Uninsured",
  "mtm_btfp:uninsured_lev" = "MTM_OMO × Uninsured",
  "mtm_btfp_x_uninsured" = "MTM_OMO × Uninsured",
  "systematic_mtm" = "Systematic MTM (z)",
  "idiosyncratic_mtm" = "Idiosyncratic MTM (z)",
  "systematic_x_uninsured" = "Systematic MTM × Uninsured",
  "idiosyncratic_x_uninsured" = "Idiosyncratic MTM × Uninsured",
  "run_risk_2" = "Risk 2: Low MTM, High Uninsured",
  "run_risk_3" = "Risk 3: High MTM, Low Uninsured",
  "run_risk_4" = "Risk 4: High MTM, High Uninsured",
  "ln_assets" = "Log(Assets) (z)",
  "cash_ratio" = "Cash Ratio (z)",
  "loan_to_deposit" = "Loan/Deposit (z)",
  "book_equity_ratio" = "Book Equity (z)",
  "wholesale" = "Wholesale (z)",
  "roa" = "ROA (z)",
  "par_benefit" = "Par Benefit (z)",
  "collateral_capacity" = "Collateral Capacity (z)",
  "uninsured_outflow" = "Uninsured Outflow (z)"
)

# GOF statistics
gof_lpm <- c("nobs", "r.squared", "adj.r.squared")
gof_logit <- c("nobs", "logLik", "AIC")

# Run 4-specification models
run_4spec_models <- function(data, dv, family_type = c("lpm","logit")) {
  family_type <- match.arg(family_type)
  forms <- list(
    "(1) Base" = build_formula(dv, "mtm_total + uninsured_lev + mtm_x_uninsured"),
    "(2) +Risk1" = build_formula(dv, "run_risk_1"),
    "(3) +Risk1,2" = build_formula(dv, "run_risk_1 + run_risk_2"),
    "(4) Risk2,3,4" = build_formula(dv, "run_risk_2 + run_risk_3 + run_risk_4")
  )
  if (family_type == "lpm") {
    lapply(forms, function(ff) feols(ff, data = data, vcov = "hetero"))
  } else {
    lapply(forms, function(ff) feglm(ff, data = data, family = binomial("logit"), vcov = "hetero"))
  }
}

# Create N rows for tables
create_n_rows <- function(data, dv, n_models = 4) {
  n_ones <- sum(data[[dv]] == 1, na.rm = TRUE)
  n_sample <- nrow(data)
  out <- data.frame(term = c(paste0("N (", dv, "=1)"), "N (Sample)"))
  for (i in 1:n_models) out[[paste0("(", i, ")")]] <- c(n_ones, n_sample)
  out
}

3 LOAD AND PREPARE DATA

# ==============================================================================
# LOAD DATA
# ==============================================================================

call_q <- read_csv(file.path(DATA_PROC, "final_call_gsib.csv"), show_col_types = FALSE) %>%
  mutate(idrssd = as.character(idrssd))

btfp_loans_raw <- read_csv(file.path(DATA_PROC, "btfp_loan_bank_only.csv"), show_col_types = FALSE) %>%
  mutate(rssd_id = as.character(rssd_id), btfp_loan_date = mdy(btfp_loan_date))

dw_loans_raw <- read_csv(file.path(DATA_PROC, "dw_loan_bank_2023.csv"), show_col_types = FALSE) %>%
  mutate(rssd_id = as.character(rssd_id), dw_loan_date = ymd(dw_loan_date))

cat("=== DATA LOADED ===\n")
## === DATA LOADED ===
cat("Call Report:", nrow(call_q), "obs |", n_distinct(call_q$idrssd), "banks\n")
## Call Report: 75989 obs | 5074 banks
cat("BTFP Loans:", nrow(btfp_loans_raw), "| DW Loans:", nrow(dw_loans_raw), "\n")
## BTFP Loans: 6734 | DW Loans: 10008
# Exclude failed banks and G-SIBs
excluded_banks <- call_q %>%
  filter(period == BASELINE_MAIN, failed_bank == 1 | gsib == 1) %>%
  pull(idrssd)

btfp_loans <- btfp_loans_raw %>% filter(!rssd_id %in% excluded_banks)
dw_loans <- dw_loans_raw %>% filter(!rssd_id %in% excluded_banks)

cat("Excluded banks (failed + G-SIBs):", length(excluded_banks), "\n")
## Excluded banks (failed + G-SIBs): 41
# ==============================================================================
# CREATE BORROWER INDICATORS FOR MISSING TIME WINDOWS
# ==============================================================================

create_borrower_indicator <- function(loans_df, date_col, id_col, amount_col,
                                       start_date, end_date, prefix) {
  loans_df %>%
    filter(!!sym(date_col) >= start_date, !!sym(date_col) <= end_date) %>%
    group_by(!!sym(id_col)) %>%
    summarise(
      "{prefix}" := 1L,
      "{prefix}_amt" := sum(!!sym(amount_col), na.rm = TRUE),
      "{prefix}_first" := min(!!sym(date_col)),
      .groups = "drop"
    ) %>%
    rename(idrssd = !!sym(id_col))
}

# ==============================================================================
# NEW DW WINDOWS FOR MISSING TESTS
# ==============================================================================

# March 8-12 (SVB troubles through Silvergate failure/BTFP announcement)
dw_mar8_12 <- create_borrower_indicator(
  dw_loans, "dw_loan_date", "rssd_id", "dw_loan_amount",
  DATE_MAR08, DATE_MAR12, "dw_mar8_12"
)

# March 8-31 (High DW borrowing period)
dw_mar8_31 <- create_borrower_indicator(
  dw_loans, "dw_loan_date", "rssd_id", "dw_loan_amount",
  DATE_MAR08, DATE_MAR31, "dw_mar8_31"
)

# Standard periods (for comparison)
dw_mar10 <- create_borrower_indicator(
  dw_loans, "dw_loan_date", "rssd_id", "dw_loan_amount",
  DATE_MAR10, DATE_MAR10, "dw_mar10"
)

dw_acute <- create_borrower_indicator(
  dw_loans, "dw_loan_date", "rssd_id", "dw_loan_amount",
  ACUTE_START, min(ACUTE_END, DW_DATA_END), "dw_acute"
)

btfp_acute <- create_borrower_indicator(
  btfp_loans, "btfp_loan_date", "rssd_id", "btfp_loan_amount",
  ACUTE_START, ACUTE_END, "btfp_acute"
)

btfp_arb <- create_borrower_indicator(
  btfp_loans, "btfp_loan_date", "rssd_id", "btfp_loan_amount",
  ARB_START, ARB_END, "btfp_arb"
)

cat("\n=== NEW DW BORROWER COUNTS ===\n")
## 
## === NEW DW BORROWER COUNTS ===
cat("DW Mar 8-12:", nrow(dw_mar8_12), "\n")
## DW Mar 8-12: 69
cat("DW Mar 8-31:", nrow(dw_mar8_31), "\n")
## DW Mar 8-31: 298
# ==============================================================================
# CONSTRUCT ANALYSIS VARIABLES (from main_analysis.Rmd)
# ==============================================================================

construct_analysis_vars <- function(baseline_data) {
  
  baseline_data %>%
    mutate(
      # Raw variables
      mtm_total_raw = mtm_loss_to_total_asset,
      mtm_btfp_raw = mtm_loss_omo_eligible_to_total_asset,
      mtm_other_raw = mtm_loss_non_omo_eligible_to_total_asset,
      uninsured_lev_raw = uninsured_deposit_to_total_asset,
      uninsured_share_raw = uninsured_to_deposit,
      
      ln_assets_raw = log(total_asset),
      cash_ratio_raw = cash_to_total_asset,
      book_equity_ratio_raw = book_equity_to_total_asset,
      roa_raw = roa,
      loan_to_deposit_raw = loan_to_deposit,
      wholesale_raw = safe_div(
        fed_fund_purchase + repo + replace_na(other_borrowed_less_than_1yr, 0),
        total_liability, 0
      ) * 100,
      
      # Deposit outflows
      uninsured_outflow_raw = change_uninsured_fwd_q,
      insured_outflow_raw = change_insured_deposit_fwd_q,
      total_outflow_raw = change_total_deposit_fwd_q,
      
      # Size category
      size_cat = create_size_category(total_asset),
      size_cat = factor(size_cat, levels = c("Small (<$1B)", "Medium ($1B-$100B)", "Large (>$100B)")),
      
      # Winsorized
      mtm_total_w = winsorize(mtm_total_raw),
      mtm_btfp_w = winsorize(mtm_btfp_raw),
      mtm_other_w = winsorize(mtm_other_raw),
      uninsured_lev_w = winsorize(uninsured_lev_raw),
      ln_assets_w = winsorize(ln_assets_raw),
      cash_ratio_w = winsorize(cash_ratio_raw),
      book_equity_ratio_w = winsorize(book_equity_ratio_raw),
      roa_w = winsorize(roa_raw),
      loan_to_deposit_w = winsorize(loan_to_deposit_raw),
      wholesale_w = winsorize(wholesale_raw),
      uninsured_outflow_w = winsorize(uninsured_outflow_raw),
      
      # Z-standardized
      mtm_total = standardize_z(mtm_total_w),
      mtm_btfp = standardize_z(mtm_btfp_w),
      mtm_other = standardize_z(mtm_other_w),
      uninsured_lev = standardize_z(uninsured_lev_w),
      ln_assets = standardize_z(ln_assets_w),
      cash_ratio = standardize_z(cash_ratio_w),
      book_equity_ratio = standardize_z(book_equity_ratio_w),
      roa = standardize_z(roa_w),
      loan_to_deposit = standardize_z(loan_to_deposit_w),
      wholesale = standardize_z(wholesale_w),
      uninsured_outflow = standardize_z(uninsured_outflow_w),
      
      # Interactions
      mtm_x_uninsured = mtm_total * uninsured_lev,
      mtm_btfp_x_uninsured = mtm_btfp * uninsured_lev,
      
      # Medians for risk categories
      median_mtm_used = median(mtm_total_w, na.rm = TRUE),
      median_uninsured_used = median(uninsured_lev_w, na.rm = TRUE)
    )
}

# Add run risk dummies
add_run_risk_dummies <- function(data) {
  data %>%
    mutate(
      run_risk_1 = as.integer(mtm_total_w <= median_mtm_used & uninsured_lev_w <= median_uninsured_used),
      run_risk_2 = as.integer(mtm_total_w <= median_mtm_used & uninsured_lev_w > median_uninsured_used),
      run_risk_3 = as.integer(mtm_total_w > median_mtm_used & uninsured_lev_w <= median_uninsured_used),
      run_risk_4 = as.integer(mtm_total_w > median_mtm_used & uninsured_lev_w > median_uninsured_used),
      run_risk_cat = case_when(
        run_risk_1 == 1 ~ "Risk 1",
        run_risk_2 == 1 ~ "Risk 2",
        run_risk_3 == 1 ~ "Risk 3",
        run_risk_4 == 1 ~ "Risk 4",
        TRUE ~ NA_character_
      )
    )
}

# Create baseline datasets
df_2022q4 <- call_q %>%
  filter(period == BASELINE_MAIN, !idrssd %in% excluded_banks,
         !is.na(omo_eligible) & omo_eligible > 0) %>%
  construct_analysis_vars() %>%
  add_run_risk_dummies()

df_2023q3 <- call_q %>%
  filter(period == BASELINE_ARB, !idrssd %in% excluded_banks,
         !is.na(omo_eligible) & omo_eligible > 0) %>%
  construct_analysis_vars() %>%
  add_run_risk_dummies()

cat("\n=== BASELINE DATASETS ===\n")
## 
## === BASELINE DATASETS ===
cat("2022Q4:", nrow(df_2022q4), "banks\n")
## 2022Q4: 4292 banks
cat("2023Q3:", nrow(df_2023q3), "banks\n")
## 2023Q3: 4214 banks
# ==============================================================================
# JOIN BORROWER INDICATORS
# ==============================================================================

# Helper function to join borrowers
join_borrowers <- function(df, btfp_df, dw_df, btfp_var, dw_var) {
  df %>%
    left_join(btfp_df %>% select(idrssd, starts_with(btfp_var)), by = "idrssd") %>%
    left_join(dw_df %>% select(idrssd, starts_with(dw_var)), by = "idrssd") %>%
    mutate(
      "{btfp_var}" := replace_na(!!sym(btfp_var), 0L),
      "{dw_var}" := replace_na(!!sym(dw_var), 0L),
      fhlb_user = as.integer(abnormal_fhlb_borrowing_10pct == 1),
      any_fed = as.integer(!!sym(btfp_var) == 1 | !!sym(dw_var) == 1),
      non_user = as.integer(!!sym(btfp_var) == 0 & !!sym(dw_var) == 0 & fhlb_user == 0)
    )
}

# Create acute dataset with intensive margin vars
df_acute <- join_borrowers(df_2022q4, btfp_acute, dw_acute, "btfp_acute", "dw_acute") %>%
  mutate(
    btfp_pct = ifelse(btfp_acute == 1 & btfp_acute_amt > 0, 
                      100 * btfp_acute_amt / (total_asset * 1000), NA_real_),
    dw_pct = ifelse(dw_acute == 1 & dw_acute_amt > 0, 
                    100 * dw_acute_amt / (total_asset * 1000), NA_real_)
  )

# Create arbitrage dataset
df_arb <- df_2023q3 %>%
  left_join(btfp_arb %>% select(idrssd, btfp_arb, btfp_arb_amt), by = "idrssd") %>%
  mutate(
    btfp_arb = replace_na(btfp_arb, 0L),
    fhlb_user = as.integer(abnormal_fhlb_borrowing_10pct == 1),
    non_user = as.integer(btfp_arb == 0 & fhlb_user == 0)
  )

# Create new DW window datasets
df_mar8_12 <- df_2022q4 %>%
  left_join(dw_mar8_12 %>% select(idrssd, dw_mar8_12, dw_mar8_12_amt), by = "idrssd") %>%
  mutate(
    dw_mar8_12 = replace_na(dw_mar8_12, 0L),
    fhlb_user = as.integer(abnormal_fhlb_borrowing_10pct == 1),
    non_user = as.integer(dw_mar8_12 == 0 & fhlb_user == 0)
  )

df_mar8_31 <- df_2022q4 %>%
  left_join(dw_mar8_31 %>% select(idrssd, dw_mar8_31, dw_mar8_31_amt), by = "idrssd") %>%
  mutate(
    dw_mar8_31 = replace_na(dw_mar8_31, 0L),
    fhlb_user = as.integer(abnormal_fhlb_borrowing_10pct == 1),
    non_user = as.integer(dw_mar8_31 == 0 & fhlb_user == 0)
  )

cat("\n=== DATASET SUMMARY ===\n")
## 
## === DATASET SUMMARY ===
cat("Acute: BTFP=", sum(df_acute$btfp_acute), "| DW=", sum(df_acute$dw_acute), "\n")
## Acute: BTFP= 462 | DW= 393
cat("Mar 8-12: DW=", sum(df_mar8_12$dw_mar8_12), "\n")
## Mar 8-12: DW= 66
cat("Mar 8-31: DW=", sum(df_mar8_31$dw_mar8_31), "\n")
## Mar 8-31: DW= 280
cat("Arbitrage: BTFP=", sum(df_arb$btfp_arb), "\n")
## Arbitrage: BTFP= 766

4 TEST 1: DW REGRESSIONS - MARCH 8-12 WINDOW

Rationale: SVB troubles started March 8, failure March 10, weekend borrowing March 11-12. Silvergate failed March 12, BTFP announced same day. This window captures DW usage before BTFP was available.

# ==============================================================================
# TEST 1: DW MARCH 8-12 REGRESSIONS
# ==============================================================================

cat("\n", paste(rep("=", 80), collapse = ""), "\n")

================================================================================

cat("TEST 1: DW MARCH 8-12 WINDOW (SVB Troubles through BTFP Announcement)\n")

TEST 1: DW MARCH 8-12 WINDOW (SVB Troubles through BTFP Announcement)

cat(paste(rep("=", 80), collapse = ""), "\n")

================================================================================

# Create clean sample: DW users vs pure non-users
df_dw_mar8_12_s <- df_mar8_12 %>%
  filter(dw_mar8_12 == 1 | non_user == 1)

cat("\nSample for DW Mar 8-12 analysis:\n")

Sample for DW Mar 8-12 analysis:

cat("  DW users:", sum(df_dw_mar8_12_s$dw_mar8_12), "\n")

DW users: 66

cat("  Pure non-users:", sum(df_dw_mar8_12_s$non_user), "\n")

Pure non-users: 3931

cat("  Total:", nrow(df_dw_mar8_12_s), "\n")

Total: 3997

# Run 4-spec models
if (sum(df_dw_mar8_12_s$dw_mar8_12) >= 5) {
  m_dw_mar8_12 <- run_4spec_models(df_dw_mar8_12_s, "dw_mar8_12", "lpm")
  n_dw_mar8_12 <- create_n_rows(df_dw_mar8_12_s, "dw_mar8_12")
  
  # 1. Save the table FIRST (does not print to screen)
  save_reg_table(m_dw_mar8_12, "Table_DW_Mar8_12_LPM",
                 title_text = "Discount Window Usage During Pre-BTFP Crisis Window (March 8--12, 2023)",
                 notes_text = "This table reports Linear Probability Model (LPM) estimates...",
                 coef_map_use = COEF_MAP, gof_map_use = gof_lpm, add_rows_use = n_dw_mar8_12)

  # 2. Display the table LAST (this will now show up in your viewer)
  modelsummary(
    m_dw_mar8_12,
    stars = c('*' = 0.1, '**' = 0.05, '***' = 0.01),
    coef_map = COEF_MAP, gof_map = gof_lpm, add_rows = n_dw_mar8_12,
    title = "Table: DW Usage - March 8-12, 2023 (SVB Troubles through BTFP Announcement)",
    output = "kableExtra"
  ) %>% kable_styling(bootstrap_options = c("striped", "hover"), font_size = 9)
  
} else {
  cat("\nInsufficient DW users for March 8-12 regression.\n")
}
Saved: Table_DW_Mar8_12_LPM (HTML + LaTeX)
Table: DW Usage - March 8-12, 2023 (SVB Troubles through BTFP Announcement)
&nbsp;(1) Base &nbsp;(2) +Risk1 &nbsp;(3) +Risk1,2 &nbsp;(4) Risk2,3,4
MTM Loss (z) −0.000
(0.003)
Uninsured Lev (z) 0.002
(0.002)
MTM × Uninsured 0.001
(0.002)
Risk 2: Low MTM, High Uninsured −0.001 −0.001
(0.006) (0.005)
Risk 3: High MTM, Low Uninsured −0.003
(0.005)
Risk 4: High MTM, High Uninsured 0.005
(0.007)
Log(Assets) (z) 0.016*** 0.017*** 0.017*** 0.016***
(0.003) (0.003) (0.004) (0.003)
Cash Ratio (z) −0.003* −0.003* −0.003 −0.003*
(0.002) (0.002) (0.002) (0.002)
Loan/Deposit (z) −0.001 −0.001 −0.001 −0.001
(0.002) (0.002) (0.002) (0.002)
Book Equity (z) −0.000 −0.001 −0.001 −0.000
(0.002) (0.001) (0.001) (0.001)
Wholesale (z) 0.011*** 0.011*** 0.011*** 0.011***
(0.003) (0.003) (0.003) (0.003)
ROA (z) −0.003* −0.003 −0.002 −0.003*
(0.002) (0.002) (0.002) (0.002)
Num.Obs. 3987 3993 3987 3987
R2 0.028 0.028 0.028 0.029
R2 Adj. 0.026 0.026 0.026 0.026
N (dw_mar8_12=1) 66.000 66.000 66.000 66.000
N (Sample) 3997.000 3997.000 3997.000 3997.000
* p < 0.1, ** p < 0.05, *** p < 0.01

5 TEST 2: DW REGRESSIONS - MARCH 8-31 WINDOW

Rationale: High DW borrowing continued through March 31 (from daily borrowing figure). First Republic had temporary recovery in April before May 1 failure.

# ==============================================================================
# TEST 2: DW MARCH 8-31 REGRESSIONS
# ==============================================================================

cat("\n", paste(rep("=", 80), collapse = ""), "\n")

================================================================================

cat("TEST 2: DW MARCH 8-31 WINDOW (High DW Borrowing Period)\n")

TEST 2: DW MARCH 8-31 WINDOW (High DW Borrowing Period)

cat(paste(rep("=", 80), collapse = ""), "\n")

================================================================================

# Create clean sample
df_dw_mar8_31_s <- df_mar8_31 %>%
  filter(dw_mar8_31 == 1 | non_user == 1)

cat("\nSample for DW Mar 8-31 analysis:\n")

Sample for DW Mar 8-31 analysis:

cat("  DW users:", sum(df_dw_mar8_31_s$dw_mar8_31), "\n")

DW users: 280

cat("  Pure non-users:", sum(df_dw_mar8_31_s$non_user), "\n")

Pure non-users: 3734

cat("  Total:", nrow(df_dw_mar8_31_s), "\n")

Total: 4014

# Run 4-spec models
if (sum(df_dw_mar8_31_s$dw_mar8_31) >= 5) {
  m_dw_mar8_31 <- run_4spec_models(df_dw_mar8_31_s, "dw_mar8_31", "lpm")
  n_dw_mar8_31 <- create_n_rows(df_dw_mar8_31_s, "dw_mar8_31")
  
  # 1. Save the table FIRST (internal action, no output)
  save_reg_table(m_dw_mar8_31, "Table_DW_Mar8_31_LPM",
                 title_text = "Discount Window Usage During Extended Crisis Window (March 8--31, 2023)",
                 notes_text = "This table reports Linear Probability Model (LPM) estimates of Discount Window borrowing during the extended crisis window. The dependent variable equals one if the bank accessed the Discount Window between March 8--31, 2023. This period captures sustained high DW borrowing before First Republic's temporary stabilization in April. Column (1) reports the base specification with continuous MTM loss and uninsured leverage measures plus their interaction. Columns (2)--(4) introduce risk category dummies based on median splits. The sample consists of DW users versus pure non-users (banks with no BTFP, DW, or abnormal FHLB borrowing), excluding G-SIBs and failed institutions. Bank characteristics are from 2022Q4 Call Reports. All continuous variables are winsorized at 2.5/97.5 percentiles and z-standardized. Heteroskedasticity-robust standard errors in parentheses.",
                 coef_map_use = COEF_MAP, gof_map_use = gof_lpm, add_rows_use = n_dw_mar8_31)

  # 2. Display the table LAST (renders in viewer)
  modelsummary(
    m_dw_mar8_31,
    stars = c('*' = 0.1, '**' = 0.05, '***' = 0.01),
    coef_map = COEF_MAP, gof_map = gof_lpm, add_rows = n_dw_mar8_31,
    title = "Table: DW Usage - March 8-31, 2023 (High DW Borrowing Period)",
    output = "kableExtra"
  ) %>% kable_styling(bootstrap_options = c("striped", "hover"), font_size = 9)
  
} else {
  cat("\nInsufficient DW users for March 8-31 regression.\n")
}
Saved: Table_DW_Mar8_31_LPM (HTML + LaTeX)
Table: DW Usage - March 8-31, 2023 (High DW Borrowing Period)
&nbsp;(1) Base &nbsp;(2) +Risk1 &nbsp;(3) +Risk1,2 &nbsp;(4) Risk2,3,4
MTM Loss (z) 0.004
(0.005)
Uninsured Lev (z) 0.009*
(0.005)
MTM × Uninsured 0.003
(0.004)
Risk 2: Low MTM, High Uninsured −0.008 0.008
(0.011) (0.011)
Risk 3: High MTM, Low Uninsured 0.009
(0.010)
Risk 4: High MTM, High Uninsured 0.025**
(0.012)
Log(Assets) (z) 0.057*** 0.059*** 0.060*** 0.058***
(0.006) (0.006) (0.006) (0.006)
Cash Ratio (z) −0.001 −0.001 0.000 −0.000
(0.004) (0.004) (0.004) (0.004)
Loan/Deposit (z) −0.001 −0.002 −0.002 −0.001
(0.005) (0.005) (0.005) (0.005)
Book Equity (z) 0.002 −0.000 0.000 0.001
(0.004) (0.003) (0.003) (0.003)
Wholesale (z) 0.013*** 0.012*** 0.012*** 0.012***
(0.005) (0.005) (0.005) (0.005)
ROA (z) −0.003 −0.002 −0.002 −0.002
(0.004) (0.004) (0.004) (0.004)
Num.Obs. 4004 4010 4004 4004
R2 0.061 0.061 0.061 0.061
R2 Adj. 0.059 0.059 0.059 0.059
N (dw_mar8_31=1) 280.000 280.000 280.000 280.000
N (Sample) 4014.000 4014.000 4014.000 4014.000
* p < 0.1, ** p < 0.05, *** p < 0.01

6 TEST 3: SYSTEMATIC VS IDIOSYNCRATIC MTM DECOMPOSITION

Goldstein Framework: Strategic complementarities should be higher for fragile banks when the shock is systematic. We test:

\[\text{BTFP}_i = \beta_1 \text{Systematic\_MTM}_i + \beta_2 \text{Idiosyncratic\_MTM}_i + \beta_3 \text{Uninsured}_i + \beta_4 (\text{Systematic} \times \text{Uninsured})_i + \beta_5 (\text{Idiosyncratic} \times \text{Uninsured})_i + \varepsilon_i\]

Prediction: \(\beta_4 > \beta_5\) (systematic shocks amplified more by liquidity mismatch)

6.1 3.1 Peer Averages by Size

# ==============================================================================
# TEST 3A: SYSTEMATIC VS IDIOSYNCRATIC MTM - BY SIZE PEERS
# ==============================================================================

cat("\n", paste(rep("=", 80), collapse = ""), "\n")
## 
##  ================================================================================
cat("TEST 3A: SYSTEMATIC VS IDIOSYNCRATIC MTM DECOMPOSITION (SIZE PEERS)\n")
## TEST 3A: SYSTEMATIC VS IDIOSYNCRATIC MTM DECOMPOSITION (SIZE PEERS)
cat(paste(rep("=", 80), collapse = ""), "\n")
## ================================================================================
# Calculate peer average MTM by size category (exclude own bank)
df_systematic_size <- df_acute %>%
  group_by(size_cat) %>%
  mutate(
    # Peer average (exclude self): sum minus self, divided by n-1
    peer_count = n(),
    peer_sum_mtm = sum(mtm_total_w, na.rm = TRUE),
    systematic_mtm_raw = ifelse(peer_count > 1,
                            (peer_sum_mtm - coalesce(mtm_total_w, 0)) / (peer_count - 1),
                            NA_real_),
    idiosyncratic_mtm_raw = mtm_total_w - systematic_mtm_raw
  ) %>%
  ungroup() %>%
  mutate(
    # Winsorize and z-score
    systematic_mtm_w = winsorize(systematic_mtm_raw),
    idiosyncratic_mtm_w = winsorize(idiosyncratic_mtm_raw),
    systematic_mtm = standardize_z(systematic_mtm_w),
    idiosyncratic_mtm = standardize_z(idiosyncratic_mtm_w),
    # Interactions
    systematic_x_uninsured = systematic_mtm * uninsured_lev,
    idiosyncratic_x_uninsured = idiosyncratic_mtm * uninsured_lev
  )

# Filter for BTFP users vs pure non-users
df_sys_size_btfp <- df_systematic_size %>%
  filter(btfp_acute == 1 | non_user == 1)

cat("\nSystematic MTM Summary (by size):\n")
## 
## Systematic MTM Summary (by size):
cat("  Mean Systematic:", round(mean(df_sys_size_btfp$systematic_mtm, na.rm = TRUE), 3), "\n")
##   Mean Systematic: -0.041
cat("  Mean Idiosyncratic:", round(mean(df_sys_size_btfp$idiosyncratic_mtm, na.rm = TRUE), 3), "\n")
##   Mean Idiosyncratic: -0.016
cat("  Correlation:", round(cor(df_sys_size_btfp$systematic_mtm, df_sys_size_btfp$idiosyncratic_mtm, use = "complete"), 3), "\n")
##   Correlation: -0.446
# Build models
f_sys1 <- as.formula(paste("btfp_acute ~ systematic_mtm + idiosyncratic_mtm + uninsured_lev +", CONTROLS))
f_sys2 <- as.formula(paste("btfp_acute ~ systematic_mtm + idiosyncratic_mtm + uninsured_lev + systematic_x_uninsured + idiosyncratic_x_uninsured +", CONTROLS))
f_sys3 <- as.formula(paste("btfp_acute ~ systematic_mtm + idiosyncratic_mtm + uninsured_lev + systematic_x_uninsured +", CONTROLS))
f_sys4 <- as.formula(paste("btfp_acute ~ systematic_mtm + idiosyncratic_mtm + uninsured_lev + idiosyncratic_x_uninsured +", CONTROLS))

models_sys_size <- list(
  "(1) Main Effects" = feols(f_sys1, data = df_sys_size_btfp, vcov = "hetero"),
  "(2) Both Interactions" = feols(f_sys2, data = df_sys_size_btfp, vcov = "hetero"),
  "(3) Sys×Unins Only" = feols(f_sys3, data = df_sys_size_btfp, vcov = "hetero"),
  "(4) Idio×Unins Only" = feols(f_sys4, data = df_sys_size_btfp, vcov = "hetero")
)

n_sys_size <- data.frame(
  term = c("N (BTFP=1)", "N (Sample)"),
  `(1) Main Effects` = c(sum(df_sys_size_btfp$btfp_acute), nrow(df_sys_size_btfp)),
  `(2) Both Interactions` = c(sum(df_sys_size_btfp$btfp_acute), nrow(df_sys_size_btfp)),
  `(3) Sys×Unins Only` = c(sum(df_sys_size_btfp$btfp_acute), nrow(df_sys_size_btfp)),
  `(4) Idio×Unins Only` = c(sum(df_sys_size_btfp$btfp_acute), nrow(df_sys_size_btfp)),
  check.names = FALSE
)

modelsummary(
  models_sys_size,
  stars = c('*' = 0.1, '**' = 0.05, '***' = 0.01),
  coef_map = COEF_MAP, gof_map = gof_lpm, add_rows = n_sys_size,
  title = "Table: Systematic vs Idiosyncratic MTM - Size Peer Averages",
  output = "kableExtra"
) %>% kable_styling(bootstrap_options = c("striped", "hover"), font_size = 9)
Table: Systematic vs Idiosyncratic MTM - Size Peer Averages
&nbsp;(1) Main Effects &nbsp;(2) Both Interactions &nbsp;(3) Sys×Unins Only &nbsp;(4) Idio×Unins Only
Uninsured Lev (z) 0.015** 0.019*** 0.016** 0.020***
(0.006) (0.007) (0.006) (0.007)
Systematic MTM (z) −0.007 −0.011 −0.005 −0.005
(0.010) (0.011) (0.011) (0.010)
Idiosyncratic MTM (z) 0.017** 0.020*** 0.018** 0.022***
(0.007) (0.007) (0.007) (0.007)
Systematic MTM × Uninsured 0.008 −0.002
(0.007) (0.006)
Idiosyncratic MTM × Uninsured 0.022*** 0.018***
(0.006) (0.005)
Log(Assets) (z) 0.072*** 0.073*** 0.071*** 0.070***
(0.009) (0.009) (0.009) (0.009)
Cash Ratio (z) −0.026*** −0.024*** −0.025*** −0.024***
(0.005) (0.005) (0.005) (0.005)
Loan/Deposit (z) −0.012** −0.009 −0.011** −0.008
(0.006) (0.006) (0.006) (0.005)
Book Equity (z) −0.012*** −0.011** −0.012*** −0.011**
(0.004) (0.004) (0.004) (0.004)
Wholesale (z) 0.026*** 0.025*** 0.026*** 0.025***
(0.006) (0.006) (0.006) (0.006)
ROA (z) −0.003 −0.005 −0.003 −0.005
(0.005) (0.005) (0.005) (0.005)
Num.Obs. 3737 3737 3737 3737
R2 0.087 0.091 0.087 0.090
R2 Adj. 0.085 0.088 0.085 0.088
N (BTFP=1) 462.000 462.000 462.000 462.000
N (Sample) 3747.000 3747.000 3747.000 3747.000
* p < 0.1, ** p < 0.05, *** p < 0.01
# Save
save_reg_table(models_sys_size, "Table_Systematic_Idiosyncratic_Size",
               title_text = "Systematic vs. Idiosyncratic MTM Decomposition (Size Peer Averages)",
               notes_text = "This table reports Linear Probability Model (LPM) estimates decomposing MTM losses into systematic and idiosyncratic components using size-based peer groups. The dependent variable equals one if the bank accessed BTFP during the acute crisis period (March 13--May 1, 2023). Systematic MTM is the leave-one-out peer average MTM loss within size category (Small $<$\\$1B, Medium \\$1B--\\$100B, Large $>$\\$100B). Idiosyncratic MTM is bank's total MTM minus the systematic component. Following Goldstein \\& Pauzner (2005), strategic complementarities predict that the Systematic $\\times$ Uninsured interaction should exceed the Idiosyncratic $\\times$ Uninsured interaction. The sample consists of BTFP users versus pure non-users, excluding G-SIBs and failed institutions. Bank characteristics are from 2022Q4 Call Reports. All continuous variables are winsorized at 2.5/97.5 percentiles and z-standardized. Heteroskedasticity-robust standard errors in parentheses.")
## Saved: Table_Systematic_Idiosyncratic_Size (HTML + LaTeX)
# ==============================================================================
# FORMAL HYPOTHESIS TEST (Based on Test 3A, Model 2)
# ==============================================================================

cat("\n--- Formal Wald Test: Systematic vs. Idiosyncratic (Size Peers) ---\n")
## 
## --- Formal Wald Test: Systematic vs. Idiosyncratic (Size Peers) ---
# Extract the "m_bothe" model (Model 2 in your list)
m_both <- models_sys_size[["(2) Both Interactions"]]

# Get coefficients
beta_sys <- coef(m_both)["systematic_x_uninsured"]
beta_idio <- coef(m_both)["idiosyncratic_x_uninsured"]

cat("Beta (Systematic x Uninsured): ", round(beta_sys, 4), "\n")
## Beta (Systematic x Uninsured):  0.008
cat("Beta (Idiosyncratic x Uninsured):", round(beta_idio, 4), "\n")
## Beta (Idiosyncratic x Uninsured): 0.0219
# Calculate Ratio
if(!is.na(beta_idio) && beta_idio != 0) {
    cat("Ratio (Sys / Idio):               ", round(beta_sys / beta_idio, 2), "x\n")
}
## Ratio (Sys / Idio):                0.36 x
# Run the test: H0: Systematic = Idiosyncratic
test_result <- car::linearHypothesis(m_both, "systematic_x_uninsured = idiosyncratic_x_uninsured")
print(test_result)
## Linear hypothesis test
## 
## Hypothesis:
## systematic_x_uninsured - idiosyncratic_x_uninsured = 0
## 
## Model 1: restricted model
## Model 2: btfp_acute ~ systematic_mtm + idiosyncratic_mtm + uninsured_lev + 
##     systematic_x_uninsured + idiosyncratic_x_uninsured + ln_assets + 
##     cash_ratio + loan_to_deposit + book_equity_ratio + wholesale + 
##     roa
## 
##   Res.Df Df Chisq Pr(>Chisq)  
## 1   3726                      
## 2   3725  1  4.61      0.032 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# ==============================================================================
# COEFFICIENT COMPARISON (BETA APPROACH)
# ==============================================================================

# 1. Extract coefficients and confidence intervals from Model 2
tidy_m2 <- tidy(m_both, conf.int = TRUE, conf.level = 0.95) %>%
  filter(term %in% c("systematic_x_uninsured", "idiosyncratic_x_uninsured")) %>%
  mutate(
    term_label = case_when(
      term == "systematic_x_uninsured" ~ "Systematic Risk\n(Market-Wide Duration)",
      term == "idiosyncratic_x_uninsured" ~ "Idiosyncratic Risk\n(Bank-Specific Weakness)"
    )
  )

# 2. Plot
p_coef <- ggplot(tidy_m2, aes(x = term_label, y = estimate, color = term_label)) +
  geom_hline(yintercept = 0, linetype = "dashed", color = "gray50") +
  geom_point(size = 4) +
  geom_errorbar(aes(ymin = conf.low, ymax = conf.high), width = 0.2, size = 1) +
  scale_color_manual(values = c("#B2182B", "#2166AC")) + # Red for Idio, Blue for Sys
  labs(
    title = "Investor Discipline vs. Panic: Which Risk Triggered Runs?",
    subtitle = "Comparing the interaction effect of Uninsured Leverage with Systematic vs. Idiosyncratic MTM",
    y = "Interaction Coefficient (Impact on BTFP Usage)",
    x = NULL,
    caption = "Note: Error bars represent 95% Confidence Intervals. \nIdiosyncratic risk interaction is statistically significant and larger (p=0.05 Wald test)."
  ) +
  theme_minimal(base_size = 14) +
  theme(
    legend.position = "none",
    plot.title = element_text(face = "bold"),
    axis.text.x = element_text(face = "bold")
  )

print(p_coef)

ggsave(file.path(FIG_PATH, "Figure_Beta_Decomposition_Coef.pdf"), p_coef, width = 8, height = 6)
# ==============================================================================
# FIGURE: MARGINAL EFFECTS (INTERACTION VISUALIZATION)
# ==============================================================================

# 1. Calculate predictions
# We look at the effect of Uninsured Leverage at High (+1 SD) vs Low (-1 SD) levels of risk
pred_idio <- predictions(
  m_both, 
  newdata = datagrid(
    uninsured_lev = seq(-2, 2, length.out = 20),
    idiosyncratic_mtm = c(-1, 1), # Low vs High Idiosyncratic
    systematic_mtm = 0            # Hold Systematic Constant
  )
) %>% mutate(Risk_Type = "Idiosyncratic MTM", Level = ifelse(idiosyncratic_mtm == 1, "High Risk (+1 SD)", "Low Risk (-1 SD)"))

pred_sys <- predictions(
  m_both, 
  newdata = datagrid(
    uninsured_lev = seq(-2, 2, length.out = 20),
    systematic_mtm = c(-1, 1),    # Low vs High Systematic
    idiosyncratic_mtm = 0         # Hold Idiosyncratic Constant
  )
) %>% mutate(Risk_Type = "Systematic MTM", Level = ifelse(systematic_mtm == 1, "High Risk (+1 SD)", "Low Risk (-1 SD)"))

# 2. Combine Data
plot_data <- bind_rows(pred_idio, pred_sys)

# 3. Plot
p_interact <- ggplot(plot_data, aes(x = uninsured_lev, y = estimate, color = Level, fill = Level)) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = 0.2, color = NA) +
  geom_line(size = 1.2) +
  facet_wrap(~Risk_Type) +
  scale_color_manual(values = c("#D6604D", "#4393C3")) +
  scale_fill_manual(values = c("#D6604D", "#4393C3")) +
  labs(
    title = "Depositor Sensitivity to Uninsured Leverage",
    subtitle = "Depositors only punish leverage when Idiosyncratic risk is high (Left).\nThey largely ignore leverage when risk is Systematic/Market-wide (Right).",
    x = "Uninsured Leverage (Z-Score)",
    y = "Predicted Probability of BTFP Usage",
    color = "Risk Level", fill = "Risk Level"
  ) +
  theme_light(base_size = 14) +
  theme(
    legend.position = "bottom",
    strip.text = element_text(face = "bold", size = 12)
  )

print(p_interact)

ggsave(file.path(FIG_PATH, "Figure_Interaction_Slopes.pdf"), p_interact, width = 10, height = 6)

6.2 3.2 Peer Averages by Region

# ==============================================================================
# TEST 3B: SYSTEMATIC VS IDIOSYNCRATIC MTM - BY REGION PEERS
# ==============================================================================

cat("\n", paste(rep("=", 80), collapse = ""), "\n")
## 
##  ================================================================================
cat("TEST 3B: SYSTEMATIC VS IDIOSYNCRATIC MTM DECOMPOSITION (REGION PEERS)\n")
## TEST 3B: SYSTEMATIC VS IDIOSYNCRATIC MTM DECOMPOSITION (REGION PEERS)
cat(paste(rep("=", 80), collapse = ""), "\n")
## ================================================================================
# Define regions based on state (from paper outline)
region_mapping <- tribble(
  ~state, ~region,
  # West/Tech (SVB home region)
  "CA", "West_Tech", "OR", "West_Tech", "WA", "West_Tech",
  # Northeast Financial (Signature, First Republic)
  "NY", "Northeast_Financial", "NJ", "Northeast_Financial", "CT", "Northeast_Financial",
  # Northeast Traditional
  "MA", "Northeast_Traditional", "NH", "Northeast_Traditional", "RI", "Northeast_Traditional",
  "VT", "Northeast_Traditional", "ME", "Northeast_Traditional", "PA", "Northeast_Traditional",
  "DE", "Northeast_Traditional",
  # Southeast Growth
  "FL", "Southeast_Growth", "NC", "Southeast_Growth", "SC", "Southeast_Growth",
  "GA", "Southeast_Growth", "TN", "Southeast_Growth",
  # Southeast Traditional
  "AL", "Southeast_Traditional", "MS", "Southeast_Traditional", "LA", "Southeast_Traditional",
  "AR", "Southeast_Traditional", "KY", "Southeast_Traditional", "WV", "Southeast_Traditional",
  # Mid-Atlantic
  "MD", "Mid_Atlantic", "VA", "Mid_Atlantic", "DC", "Mid_Atlantic",
  # Midwest Industrial
  "OH", "Midwest_Industrial", "MI", "Midwest_Industrial", "IN", "Midwest_Industrial",
  "WI", "Midwest_Industrial", "IL", "Midwest_Industrial",
  # Midwest Agricultural
  "IA", "Midwest_Agricultural", "MN", "Midwest_Agricultural", "ND", "Midwest_Agricultural",
  "SD", "Midwest_Agricultural", "NE", "Midwest_Agricultural", "KS", "Midwest_Agricultural",
  "MO", "Midwest_Agricultural",
  # Energy Belt
  "TX", "Energy_Belt", "OK", "Energy_Belt", "NM", "Energy_Belt", "WY", "Energy_Belt",
  # Mountain West
  "CO", "Mountain_West", "UT", "Mountain_West", "AZ", "Mountain_West",
  "NV", "Mountain_West", "MT", "Mountain_West", "ID", "Mountain_West",
  # Alaska/Hawaii
  "AK", "Alaska_Hawaii", "HI", "Alaska_Hawaii"
)

# Check if state variable exists in data
if ("state" %in% names(df_acute)) {
  state_var <- "state"
} else if ("state" %in% names(df_acute)) {
  state_var <- "state"
} else {
  # Try to get from call_q
  if ("state" %in% names(call_q)) {
    df_acute <- df_acute %>%
      left_join(call_q %>% filter(period == BASELINE_MAIN) %>% select(idrssd, state), by = "idrssd")
    state_var <- "state"
  } else {
    state_var <- NULL
  }
}

if (!is.null(state_var)) {
  df_systematic_region <- df_acute %>%
    left_join(region_mapping, by = setNames("state", state_var)) %>%
    mutate(region = coalesce(region, "Other")) %>%
    group_by(region) %>%
    mutate(
      peer_count = n(),
      peer_sum_mtm = sum(mtm_total_w, na.rm = TRUE),
      systematic_mtm_raw = (peer_sum_mtm - coalesce(mtm_total_w, 0)) / (peer_count - 1),
      idiosyncratic_mtm_raw = mtm_total_w - systematic_mtm_raw
    ) %>%
    ungroup() %>%
    mutate(
      systematic_mtm_w = winsorize(systematic_mtm_raw),
      idiosyncratic_mtm_w = winsorize(idiosyncratic_mtm_raw),
      systematic_mtm = standardize_z(systematic_mtm_w),
      idiosyncratic_mtm = standardize_z(idiosyncratic_mtm_w),
      systematic_x_uninsured = systematic_mtm * uninsured_lev,
      idiosyncratic_x_uninsured = idiosyncratic_mtm * uninsured_lev
    )
  
  # Filter for BTFP users vs pure non-users
  df_sys_region_btfp <- df_systematic_region %>%
    filter(btfp_acute == 1 | non_user == 1)
  
  cat("\nRegion Distribution:\n")
  print(table(df_sys_region_btfp$region))
  
  # Build models
  models_sys_region <- list(
    "(1) Main Effects" = feols(f_sys1, data = df_sys_region_btfp, vcov = "hetero"),
    "(2) Both Interactions" = feols(f_sys2, data = df_sys_region_btfp, vcov = "hetero"),
    "(3) Sys×Unins Only" = feols(f_sys3, data = df_sys_region_btfp, vcov = "hetero"),
    "(4) Idio×Unins Only" = feols(f_sys4, data = df_sys_region_btfp, vcov = "hetero")
  )
  
  n_sys_region <- data.frame(
    term = c("N (BTFP=1)", "N (Sample)"),
    `(1) Main Effects` = c(sum(df_sys_region_btfp$btfp_acute), nrow(df_sys_region_btfp)),
    `(2) Both Interactions` = c(sum(df_sys_region_btfp$btfp_acute), nrow(df_sys_region_btfp)),
    `(3) Sys×Unins Only` = c(sum(df_sys_region_btfp$btfp_acute), nrow(df_sys_region_btfp)),
    `(4) Idio×Unins Only` = c(sum(df_sys_region_btfp$btfp_acute), nrow(df_sys_region_btfp)),
    check.names = FALSE
  )
  
  # 1. Save table FIRST (internal, no output)
  save_reg_table(models_sys_region, "Table_Systematic_Idiosyncratic_Region",
                 title_text = "Systematic vs Idiosyncratic MTM Decomposition (Region Peers)",
                 notes_text = "This table reports Linear Probability Model (LPM) estimates decomposing MTM losses into systematic and idiosyncratic components using geographic region peer groups. The dependent variable equals one if the bank accessed BTFP during the acute crisis period (March 13--May 1, 2023). Systematic MTM is the leave-one-out peer average MTM loss within geographic region. Regions include: West/Tech (CA, OR, WA -- SVB region), Northeast Financial (NY, NJ, CT -- Signature, First Republic region), Southeast, Midwest, and Southwest. Idiosyncratic MTM is bank's total MTM minus the systematic component. Following Goldstein \\& Pauzner (2005), systematic shocks should have larger effects on strategic complementarities because they provide common knowledge signals about bank health. The sample consists of BTFP users versus pure non-users, excluding G-SIBs and failed institutions (SVB, Signature, First Republic, Silvergate). Bank characteristics are from 2022Q4 Call Reports. All continuous variables are winsorized at 2.5/97.5 percentiles and z-standardized. Heteroskedasticity-robust standard errors in parentheses.",
                 coef_map_use = COEF_MAP, gof_map_use = gof_lpm, add_rows_use = n_sys_region)

  # 2. Display table LAST (renders in viewer)
  modelsummary(
    models_sys_region,
    stars = c('*' = 0.1, '**' = 0.05, '***' = 0.01),
    coef_map = COEF_MAP, gof_map = gof_lpm, add_rows = n_sys_region,
    title = "Table: Systematic vs Idiosyncratic MTM - Region Peer Averages",
    output = "kableExtra"
  ) %>% kable_styling(bootstrap_options = c("striped", "hover"), font_size = 9)

} else {
  cat("\nState variable not found - skipping regional analysis.\n")
  cat("Add 'state' or 'state' column to call report data for regional peer analysis.\n")
}
## 
## Region Distribution:
## 
##         Alaska_Hawaii           Energy_Belt          Mid_Atlantic 
##                     9                   508                    78 
##  Midwest_Agricultural    Midwest_Industrial         Mountain_West 
##                   998                   699                   139 
##   Northeast_Financial Northeast_Traditional                 Other 
##                   150                   228                     4 
##      Southeast_Growth Southeast_Traditional             West_Tech 
##                   362                   443                   129 
## Saved: Table_Systematic_Idiosyncratic_Region (HTML + LaTeX)
Table: Systematic vs Idiosyncratic MTM - Region Peer Averages
&nbsp;(1) Main Effects &nbsp;(2) Both Interactions &nbsp;(3) Sys×Unins Only &nbsp;(4) Idio×Unins Only
Uninsured Lev (z) 0.015** 0.020*** 0.015** 0.019***
(0.006) (0.007) (0.006) (0.007)
Systematic MTM (z) 0.002 0.003 0.002 0.003
(0.006) (0.007) (0.007) (0.006)
Idiosyncratic MTM (z) 0.020*** 0.023*** 0.020*** 0.023***
(0.005) (0.006) (0.005) (0.006)
Systematic MTM × Uninsured 0.001 0.000
(0.006) (0.006)
Idiosyncratic MTM × Uninsured 0.018*** 0.018***
(0.005) (0.005)
Log(Assets) (z) 0.068*** 0.068*** 0.068*** 0.068***
(0.008) (0.008) (0.008) (0.008)
Cash Ratio (z) −0.026*** −0.024*** −0.026*** −0.024***
(0.005) (0.005) (0.005) (0.005)
Loan/Deposit (z) −0.011** −0.008 −0.011** −0.008
(0.006) (0.005) (0.006) (0.005)
Book Equity (z) −0.012*** −0.011** −0.012*** −0.011**
(0.004) (0.004) (0.004) (0.004)
Wholesale (z) 0.025*** 0.025*** 0.025*** 0.025***
(0.006) (0.006) (0.006) (0.006)
ROA (z) −0.003 −0.005 −0.003 −0.005
(0.005) (0.005) (0.005) (0.005)
Num.Obs. 3737 3737 3737 3737
R2 0.087 0.090 0.087 0.090
R2 Adj. 0.085 0.087 0.085 0.088
N (BTFP=1) 462.000 462.000 462.000 462.000
N (Sample) 3747.000 3747.000 3747.000 3747.000
* p < 0.1, ** p < 0.05, *** p < 0.01

6.3 3.3 Systematic MTM Comparison Plot

# ==============================================================================
# VISUALIZATION: SYSTEMATIC VS IDIOSYNCRATIC COEFFICIENTS
# ==============================================================================

# Extract coefficients for comparison
coef_comparison <- bind_rows(
  # Size peers
  tibble(
    Model = "Size Peers",
    Term = c("Systematic × Uninsured", "Idiosyncratic × Uninsured"),
    Estimate = c(
      coef(models_sys_size[[2]])["systematic_x_uninsured"],
      coef(models_sys_size[[2]])["idiosyncratic_x_uninsured"]
    ),
    SE = c(
      sqrt(vcov(models_sys_size[[2]])["systematic_x_uninsured", "systematic_x_uninsured"]),
      sqrt(vcov(models_sys_size[[2]])["idiosyncratic_x_uninsured", "idiosyncratic_x_uninsured"])
    )
  )
)

# Add region if available
if (exists("models_sys_region")) {
  coef_comparison <- bind_rows(
    coef_comparison,
    tibble(
      Model = "Region Peers",
      Term = c("Systematic × Uninsured", "Idiosyncratic × Uninsured"),
      Estimate = c(
        coef(models_sys_region[[2]])["systematic_x_uninsured"],
        coef(models_sys_region[[2]])["idiosyncratic_x_uninsured"]
      ),
      SE = c(
        sqrt(vcov(models_sys_region[[2]])["systematic_x_uninsured", "systematic_x_uninsured"]),
        sqrt(vcov(models_sys_region[[2]])["idiosyncratic_x_uninsured", "idiosyncratic_x_uninsured"])
      )
    )
  )
}

p_sys <- ggplot(coef_comparison, aes(x = Term, y = Estimate, fill = Model)) +
  geom_bar(stat = "identity", position = position_dodge(width = 0.8), width = 0.7) +
  geom_errorbar(aes(ymin = Estimate - 1.96 * SE, ymax = Estimate + 1.96 * SE),
                position = position_dodge(width = 0.8), width = 0.25) +
  geom_hline(yintercept = 0, linetype = "dashed", color = "gray50") +
  scale_fill_manual(values = c("Size Peers" = "#2166AC", "Region Peers" = "#B2182B")) +
  labs(
    title = "Goldstein Test: Systematic vs Idiosyncratic MTM Interactions",
    subtitle = "Strategic complementarities should be stronger for systematic shocks",
    x = NULL,
    y = "Coefficient Estimate (LPM)",
    caption = "Note: Error bars represent 95% CI. Goldstein (2005) predicts Systematic × Uninsured > Idiosyncratic × Uninsured."
  ) +
  theme_minimal(base_size = 12) +
  theme(legend.position = "bottom")

print(p_sys)

save_figure(p_sys, "Figure_Systematic_vs_Idiosyncratic", width = 10, height = 6)
## Saved: Figure_Systematic_vs_Idiosyncratic (PDF + PNG)

7 Test: Historical beta estimation

# ==============================================================================
# PART 1: CORRECTED HISTORICAL BETA ESTIMATION
# ==============================================================================
# MTM_loss_it = αᵢ + βᵢ(Δ Fed Funds Rate) + εᵢₜ"
# This estimates ASSET-SIDE interest rate sensitivity (duration exposure),
# NOT deposit beta (liability-side pass-through).
# Systematic MTM = β̂ᵢ × (Actual rate change during crisis)
# Idiosyncratic MTM = Total MTM - Systematic MTM
# ==============================================================================

cat("\n", paste(rep("=", 80), collapse = ""), "\n")

================================================================================

cat("SYSTEMATIC VS IDIOSYNCRATIC MTM: TRUE MTM BETA ESTIMATION\n")

SYSTEMATIC VS IDIOSYNCRATIC MTM: TRUE MTM BETA ESTIMATION

cat(paste(rep("=", 80), collapse = ""), "\n")

================================================================================

cat("\nMethodology: MTM_loss_it = α_i + β_i × Δ(Fed Funds Rate)_t + ε_it\n")

Methodology: MTM_loss_it = α_i + β_i × Δ(Fed Funds Rate)_t + ε_it

cat("This captures bank-specific ASSET-SIDE interest rate sensitivity.\n")

This captures bank-specific ASSET-SIDE interest rate sensitivity.

# ============================================================================
# STEP 1: Prepare Fed Funds Rate Data (Quarterly)
# ============================================================================

indices_path <- file.path(BASE_PATH, "01_data/raw/other/Indices_with_abs.csv")

indices_df <- read_csv(indices_path, show_col_types = FALSE) %>%
  mutate(Dates = mdy(Dates)) %>%
  arrange(Dates)

# Compute quarterly average Fed Funds rate
ff_quarterly <- indices_df %>%
  mutate(
    year = year(Dates),
    quarter = quarter(Dates),
    period = paste0(year, "Q", quarter)
  ) %>%
  group_by(period, year, quarter) %>%
  summarise(
    fed_funds_rate = mean(fedfunds, na.rm = TRUE),
    .groups = "drop"
  ) %>%
  arrange(year, quarter) %>%
  mutate(
    # Quarter index for consecutive check
    quarter_idx = year * 4 + quarter,
    # Compute ΔFF (change from previous quarter)
    delta_ff = fed_funds_rate - lag(fed_funds_rate),
    # Only valid if consecutive quarters
    prev_idx = lag(quarter_idx),
    is_consecutive = (quarter_idx - prev_idx) == 1
  ) %>%
  mutate(
    delta_ff = if_else(is_consecutive, delta_ff, NA_real_)
  ) %>%
  select(period, year, quarter, fed_funds_rate, delta_ff)

cat("\n=== FED FUNDS RATE BY QUARTER ===\n")

=== FED FUNDS RATE BY QUARTER ===

print(ff_quarterly %>% filter(year >= 2021))

8 A tibble: 14 × 5

period year quarter fed_funds_rate delta_ff 1 2022Q1 2022 1 0.124 NA
2 2022Q2 2022 2 0.772 0.649 3 2022Q3 2022 3 2.19 1.42
4 2022Q4 2022 4 3.66 1.46
5 2023Q1 2023 1 4.52 0.86
6 2023Q2 2023 2 4.99 0.473 7 2023Q3 2023 3 5.26 0.270 8 2023Q4 2023 4 5.33 0.0694 9 2024Q1 2024 1 5.33 0
10 2024Q2 2024 2 5.33 0
11 2024Q3 2024 3 5.27 -0.0606 12 2024Q4 2024 4 4.65 -0.617 13 2025Q1 2025 1 4.33 -0.322 14 2025Q2 2025 2 4.33 0

# ============================================================================
# STEP 2: Prepare Panel Data for Beta Estimation
# ============================================================================
# Use call_q which has MTM loss data across quarters
# Estimation window: 2021Q4 - 2023Q4 (pre-crisis + crisis periods)

cat("\n=== PREPARING PANEL DATA FOR BETA ESTIMATION ===\n")

=== PREPARING PANEL DATA FOR BETA ESTIMATION ===

# Merge call report data with Fed Funds changes
df_panel_beta <- call_q %>%
  filter(!idrssd %in% excluded_banks) %>%
  filter(period %in% c("2021Q4", "2022Q1", "2022Q2", "2022Q3", "2022Q4", 
                       "2023Q1", "2023Q2", "2023Q3", "2023Q4")) %>%
  select(idrssd, period, mtm_loss_to_total_asset, total_asset) %>%
  left_join(ff_quarterly %>% select(period, delta_ff), by = "period") %>%
  # Ensure we have valid observations
  filter(!is.na(mtm_loss_to_total_asset), !is.na(delta_ff)) %>%
  # Convert idrssd to character for consistency

  mutate(idrssd = as.character(idrssd))

cat("Panel observations:", nrow(df_panel_beta), "\n")

Panel observations: 32560

cat("Unique banks:", n_distinct(df_panel_beta$idrssd), "\n")

Unique banks: 4762

cat("Periods:", paste(unique(df_panel_beta$period), collapse = ", "), "\n")

Periods: 2022Q2, 2022Q3, 2022Q4, 2023Q1, 2023Q2, 2023Q3, 2023Q4

# ============================================================================
# STEP 3: Estimate Bank-Specific MTM Betas
# ============================================================================
# For each bank: MTM_loss = α + β × Δ(Fed Funds) + ε
# Require minimum 4 observations per bank for stable estimation

cat("\n=== ESTIMATING BANK-SPECIFIC MTM BETAS ===\n")

=== ESTIMATING BANK-SPECIFIC MTM BETAS ===

mtm_beta_df <- df_panel_beta %>%
  group_by(idrssd) %>%
  filter(n() >= 4) %>%  # Require at least 4 quarters
  summarise(
    n_obs = n(),
    # Run regression: MTM_loss ~ delta_ff
    mtm_beta = tryCatch({
      mod <- lm(mtm_loss_to_total_asset ~ delta_ff, data = cur_data())
      coef(mod)["delta_ff"]
    }, error = function(e) NA_real_),
    # Also get R-squared for diagnostic
    mtm_r2 = tryCatch({
      mod <- lm(mtm_loss_to_total_asset ~ delta_ff, data = cur_data())
      summary(mod)$r.squared
    }, error = function(e) NA_real_),
    # Mean MTM for reference
    mean_mtm = mean(mtm_loss_to_total_asset, na.rm = TRUE),
    .groups = "drop"
  ) %>%
  # Filter out failed estimations and extreme outliers
  filter(!is.na(mtm_beta), is.finite(mtm_beta)) %>%
  # Winsorize extreme betas (|beta| > 10 is implausible)
  mutate(
    mtm_beta_raw = mtm_beta,
    mtm_beta = pmax(pmin(mtm_beta, 10), -10)
  )

cat("Banks with valid MTM beta:", nrow(mtm_beta_df), "\n")

Banks with valid MTM beta: 4649

cat("\nMTM Beta Distribution:\n")

MTM Beta Distribution:

cat("  Mean:", round(mean(mtm_beta_df$mtm_beta), 4), "\n")

Mean: 0.1197

cat("  Median:", round(median(mtm_beta_df$mtm_beta), 4), "\n")

Median: 0.0962

cat("  SD:", round(sd(mtm_beta_df$mtm_beta), 4), "\n")

SD: 0.3698

cat("  Min:", round(min(mtm_beta_df$mtm_beta), 4), "\n")

Min: -10

cat("  Max:", round(max(mtm_beta_df$mtm_beta), 4), "\n")

Max: 2.681

cat("  Mean R²:", round(mean(mtm_beta_df$mtm_r2, na.rm = TRUE), 3), "\n")

Mean R²: 0.33

# Interpretation: Positive beta means MTM LOSS increases when rates rise
# (which is expected for banks with long-duration assets)

# ============================================================================
# STEP 4: Compute Actual Rate Change During Crisis
# ============================================================================

crisis_start <- as.Date("2022-03-16")  # First Fed rate hike
crisis_end   <- as.Date("2023-03-10")  # SVB failure

ff_crisis <- indices_df %>%
  filter(Dates >= crisis_start, Dates <= crisis_end, !is.na(fedfunds))

ff_start <- ff_crisis %>% filter(Dates == min(Dates)) %>% pull(fedfunds) %>% first()
ff_end   <- ff_crisis %>% filter(Dates == max(Dates)) %>% pull(fedfunds) %>% first()

actual_rate_change <- ff_end - ff_start

cat("\n=== CRISIS PERIOD RATE CHANGE ===\n")

=== CRISIS PERIOD RATE CHANGE ===

cat("Period:", as.character(crisis_start), "to", as.character(crisis_end), "\n")

Period: 2022-03-16 to 2023-03-10

cat("Fed Funds start:", ff_start, "%\n")

Fed Funds start: 0.08 %

cat("Fed Funds end:", ff_end, "%\n")

Fed Funds end: 4.57 %

cat("Total change:", actual_rate_change, "percentage points\n")

Total change: 4.49 percentage points

# ============================================================================
# STEP 5: Construct Systematic vs Idiosyncratic MTM
# ============================================================================

cat("\n=== CONSTRUCTING SYSTEMATIC/IDIOSYNCRATIC DECOMPOSITION ===\n")

=== CONSTRUCTING SYSTEMATIC/IDIOSYNCRATIC DECOMPOSITION ===

df_sys_mtm <- df_acute %>%
  mutate(idrssd = as.character(idrssd)) %>%
  left_join(mtm_beta_df %>% select(idrssd, mtm_beta, mtm_r2, n_obs), by = "idrssd") %>%
  mutate(
    # Systematic MTM = β̂ × Δ(Fed Funds during crisis)
    # Interpretation: MTM loss predicted by bank's historical rate sensitivity
    systematic_mtm_raw = mtm_beta * actual_rate_change,
    
    # Idiosyncratic MTM = Total MTM - Systematic MTM
    # Interpretation: Unexplained portion (bank-specific shocks, composition effects)
    idiosyncratic_mtm_raw = mtm_total_raw - systematic_mtm_raw,
    
    # Winsorize
    systematic_mtm_w = winsorize(systematic_mtm_raw),
    idiosyncratic_mtm_w = winsorize(idiosyncratic_mtm_raw),
    
    # Z-standardize
    systematic_mtm = standardize_z(systematic_mtm_w),
    idiosyncratic_mtm = standardize_z(idiosyncratic_mtm_w),
    
    # Interactions with Uninsured Leverage
    systematic_x_uninsured = systematic_mtm * uninsured_lev,
    idiosyncratic_x_uninsured = idiosyncratic_mtm * uninsured_lev
  )

# Check coverage
cat("Banks with MTM beta:", sum(!is.na(df_sys_mtm$mtm_beta)), "/", nrow(df_sys_mtm), "\n")

Banks with MTM beta: 4253 / 4292

cat("Banks with valid decomposition:", sum(!is.na(df_sys_mtm$systematic_mtm) & 
                                            !is.na(df_sys_mtm$idiosyncratic_mtm)), "\n")

Banks with valid decomposition: 4253

# Correlation check
cor_sys_idio <- cor(df_sys_mtm$systematic_mtm, df_sys_mtm$idiosyncratic_mtm, 
                    use = "complete.obs")
cat("\nCorrelation (Systematic, Idiosyncratic):", round(cor_sys_idio, 3), "\n")

Correlation (Systematic, Idiosyncratic): -0.439

cat("(Low correlation suggests good decomposition)\n")

(Low correlation suggests good decomposition)

# ============================================================================
# STEP 6: Run Goldstein-Style Regressions
# ============================================================================

cat("\n=== GOLDSTEIN (2005) STRATEGIC COMPLEMENTARITIES TEST ===\n")

=== GOLDSTEIN (2005) STRATEGIC COMPLEMENTARITIES TEST ===

cat("Prediction: β(Systematic × Uninsured) > β(Idiosyncratic × Uninsured)\n")

Prediction: β(Systematic × Uninsured) > β(Idiosyncratic × Uninsured)

cat("Rationale: Systematic shocks amplified more by funding fragility\n\n")

Rationale: Systematic shocks amplified more by funding fragility

# Clean sample: BTFP users vs pure non-users with valid decomposition
df_sample_mtm <- df_sys_mtm %>%
  filter(btfp_acute == 1 | non_user == 1) %>%
  filter(!is.na(systematic_mtm), !is.na(idiosyncratic_mtm))

cat("Sample size:", nrow(df_sample_mtm), "\n")

Sample size: 3708

cat("BTFP users:", sum(df_sample_mtm$btfp_acute), "\n")

BTFP users: 462

cat("Pure non-users:", sum(df_sample_mtm$non_user), "\n")

Pure non-users: 3246

# Model 1: Main effects only
f_main <- as.formula(paste(
  "btfp_acute ~ systematic_mtm + idiosyncratic_mtm + uninsured_lev +", CONTROLS
))

# Model 2: With interactions (Goldstein test)
f_interact <- as.formula(paste(
  "btfp_acute ~ systematic_mtm + idiosyncratic_mtm + uninsured_lev +",
  "systematic_x_uninsured + idiosyncratic_x_uninsured +", CONTROLS
))

# Model 3: Only systematic interaction
f_sys_only <- as.formula(paste(
  "btfp_acute ~ systematic_mtm + idiosyncratic_mtm + uninsured_lev +",
  "systematic_x_uninsured +", CONTROLS
))

# Model 4: Only idiosyncratic interaction
f_idio_only <- as.formula(paste(
  "btfp_acute ~ systematic_mtm + idiosyncratic_mtm + uninsured_lev +",
  "idiosyncratic_x_uninsured +", CONTROLS
))

# Estimate models
m_main <- feols(f_main, data = df_sample_mtm, vcov = "hetero")
m_interact <- feols(f_interact, data = df_sample_mtm, vcov = "hetero")
m_sys_only <- feols(f_sys_only, data = df_sample_mtm, vcov = "hetero")
m_idio_only <- feols(f_idio_only, data = df_sample_mtm, vcov = "hetero")

# Display results
models_mtm_beta <- list(
  "(1) Main Effects" = m_main,
  "(2) Both Interactions" = m_interact,
  "(3) Systematic Only" = m_sys_only,
  "(4) Idiosyncratic Only" = m_idio_only
)

# Coefficient map for this table
COEF_MAP_MTM <- c(
  "systematic_mtm" = "Systematic MTM (z)",
  "idiosyncratic_mtm" = "Idiosyncratic MTM (z)",
  "uninsured_lev" = "Uninsured Leverage (z)",
  "systematic_x_uninsured" = "Systematic × Uninsured",
  "idiosyncratic_x_uninsured" = "Idiosyncratic × Uninsured",
  "ln_assets" = "Log(Assets) (z)",
  "cash_ratio" = "Cash Ratio (z)",
  "loan_to_deposit" = "Loan/Deposit (z)",
  "book_equity_ratio" = "Book Equity (z)",
  "wholesale" = "Wholesale (z)",
  "roa" = "ROA (z)"
)

n_rows_mtm <- data.frame(
  term = c("N (BTFP=1)", "N (Sample)"),
  `(1) Main Effects` = c(sum(df_sample_mtm$btfp_acute), nrow(df_sample_mtm)),
  `(2) Both Interactions` = c(sum(df_sample_mtm$btfp_acute), nrow(df_sample_mtm)),
  `(3) Systematic Only` = c(sum(df_sample_mtm$btfp_acute), nrow(df_sample_mtm)),
  `(4) Idiosyncratic Only` = c(sum(df_sample_mtm$btfp_acute), nrow(df_sample_mtm)),
  check.names = FALSE
)

modelsummary(
  models_mtm_beta,
  stars = c('*' = 0.1, '**' = 0.05, '***' = 0.01),
  coef_map = COEF_MAP_MTM,
  gof_map = gof_lpm,
  add_rows = n_rows_mtm,
  title = "Table: Strategic Complementarities Test - True MTM Beta Decomposition",
  output = "kableExtra"
) %>% 
  kable_styling(bootstrap_options = c("striped", "hover"), font_size = 9)
Table: Strategic Complementarities Test - True MTM Beta Decomposition
&nbsp;(1) Main Effects &nbsp;(2) Both Interactions &nbsp;(3) Systematic Only &nbsp;(4) Idiosyncratic Only
Systematic MTM (z) 0.040*** 0.042*** 0.040*** 0.042***
(0.007) (0.007) (0.007) (0.007)
Idiosyncratic MTM (z) 0.019*** 0.024*** 0.019*** 0.023***
(0.006) (0.007) (0.006) (0.007)
Uninsured Leverage (z) 0.013** 0.018*** 0.013** 0.017***
(0.006) (0.007) (0.006) (0.007)
Systematic × Uninsured 0.008 −0.001
(0.006) (0.006)
Idiosyncratic × Uninsured 0.020*** 0.016***
(0.006) (0.005)
Log(Assets) (z) 0.069*** 0.069*** 0.069*** 0.069***
(0.007) (0.007) (0.007) (0.007)
Cash Ratio (z) −0.022*** −0.020*** −0.022*** −0.021***
(0.005) (0.005) (0.005) (0.005)
Loan/Deposit (z) −0.022*** −0.018*** −0.022*** −0.019***
(0.006) (0.006) (0.006) (0.006)
Book Equity (z) −0.012*** −0.010** −0.012*** −0.010**
(0.004) (0.004) (0.004) (0.004)
Wholesale (z) 0.027*** 0.027*** 0.027*** 0.027***
(0.006) (0.006) (0.006) (0.006)
ROA (z) −0.004 −0.006 −0.004 −0.006
(0.005) (0.005) (0.005) (0.005)
Num.Obs. 3708 3708 3708 3708
R2 0.095 0.098 0.095 0.097
R2 Adj. 0.093 0.095 0.092 0.095
N (BTFP=1) 462.000 462.000 462.000 462.000
N (Sample) 3708.000 3708.000 3708.000 3708.000
* p < 0.1, ** p < 0.05, *** p < 0.01
# Save table
save_reg_table(
  models_mtm_beta, 
  "Table_Goldstein_MTM_Beta_Decomposition",
  title_text = "Strategic Complementarities: Interest Rate Sensitivity Decomposition",
  notes_text = paste0(
    "This table reports Linear Probability Model (LPM) estimates decomposing MTM losses based on ",
    "bank-specific interest rate sensitivity. The dependent variable equals one if the bank accessed ",
    "BTFP during the acute crisis period (March 13--May 1, 2023). Systematic MTM = $\\hat{\\beta}_i \\times \\Delta$(Fed Funds), ",
    "where $\\hat{\\beta}_i$ is estimated from bank-level time-series regression: MTM\\_loss$_{it}$ = $\\alpha_i$ + $\\beta_i \\times \\Delta$(Fed Funds)$_t$ + $\\varepsilon_{it}$ ",
    "using quarterly data from 2021Q4--2023Q4. Idiosyncratic MTM = Total MTM $-$ Systematic MTM. ",
    "Following Goldstein \\& Pauzner (2005), strategic complementarities predict Systematic $\\times$ Uninsured $>$ ",
    "Idiosyncratic $\\times$ Uninsured (systematic shocks are amplified more by funding fragility). ",
    "The sample consists of BTFP users versus pure non-users with valid beta estimates, excluding G-SIBs and failed institutions. ",
    "Bank characteristics are from 2022Q4 Call Reports. All continuous variables are winsorized at 2.5/97.5 percentiles and z-standardized. ",
    "Heteroskedasticity-robust standard errors in parentheses."
  ),
  coef_map_use = COEF_MAP_MTM, 
  gof_map_use = gof_lpm, 
  add_rows_use = n_rows_mtm
)

Saved: Table_Goldstein_MTM_Beta_Decomposition (HTML + LaTeX)

# ============================================================================
# STEP 7: Formal Hypothesis Test (β_systematic × uninsured > β_idiosyncratic × uninsured)
# ============================================================================

cat("\n", paste(rep("=", 80), collapse = ""), "\n")

================================================================================

cat("GOLDSTEIN HYPOTHESIS TEST: β₄ > β₅?\n")

GOLDSTEIN HYPOTHESIS TEST: β₄ > β₅?

cat(paste(rep("=", 80), collapse = ""), "\n")

================================================================================

# Extract coefficients
beta_sys <- coef(m_interact)["systematic_x_uninsured"]
beta_idio <- coef(m_interact)["idiosyncratic_x_uninsured"]
se_sys <- sqrt(vcov(m_interact)["systematic_x_uninsured", "systematic_x_uninsured"])
se_idio <- sqrt(vcov(m_interact)["idiosyncratic_x_uninsured", "idiosyncratic_x_uninsured"])

cat("\nCoefficient Estimates:\n")

Coefficient Estimates:

cat("  β(Systematic × Uninsured):", round(beta_sys, 4), "(SE:", round(se_sys, 4), ")\n")

β(Systematic × Uninsured): 0.0078 (SE: 0.0062 )

cat("  β(Idiosyncratic × Uninsured):", round(beta_idio, 4), "(SE:", round(se_idio, 4), ")\n")

β(Idiosyncratic × Uninsured): 0.0196 (SE: 0.0056 )

cat("  Difference (β_sys - β_idio):", round(beta_sys - beta_idio, 4), "\n")

Difference (β_sys - β_idio): -0.0117

# Wald test: H0: β_sys = β_idio
wald_test <- wald(m_interact, "systematic_x_uninsured - idiosyncratic_x_uninsured = 0")
cat("\nWald Test (H0: β_sys = β_idio):\n")

Wald Test (H0: β_sys = β_idio):

print(wald_test)

[1] NA

# Also run linearHypothesis for alternative output
cat("\nCar::linearHypothesis Test:\n")

Car::linearHypothesis Test:

lh_test <- car::linearHypothesis(m_interact, "systematic_x_uninsured = idiosyncratic_x_uninsured")
print(lh_test)

Linear hypothesis test

Hypothesis: systematic_x_uninsured - idiosyncratic_x_uninsured = 0

Model 1: restricted model Model 2: btfp_acute ~ systematic_mtm + idiosyncratic_mtm + uninsured_lev + systematic_x_uninsured + idiosyncratic_x_uninsured + ln_assets + cash_ratio + loan_to_deposit + book_equity_ratio + wholesale + roa

Res.Df Df Chisq Pr(>Chisq)
1 3697
2 3696 1 3.5 0.061 . — Signif. codes: 0 ‘’ 0.001 ’’ 0.01 ’’ 0.05 ‘.’ 0.1 ’ ’ 1

# Interpretation
cat("\n=== INTERPRETATION ===\n")

=== INTERPRETATION ===

if (beta_sys > beta_idio) {
  cat("✓ β(Systematic × Uninsured) > β(Idiosyncratic × Uninsured)\n")
  cat("  SUPPORTS Goldstein (2005): Systematic shocks amplified more by funding fragility.\n")
  if (wald_test$p < 0.10) {
    cat("  Difference is STATISTICALLY SIGNIFICANT at 10% level.\n")
  } else {
    cat("  However, difference is NOT statistically significant.\n")
  }
} else {
  cat("✗ β(Systematic × Uninsured) < β(Idiosyncratic × Uninsured)\n")
  cat("  DOES NOT SUPPORT simple Goldstein prediction.\n")
  cat("  Interpretation: Idiosyncratic shocks may matter more, possibly because:\n")
  cat("    - Bank-specific factors (e.g., concentrated portfolios) dominate\n")
  cat("    - Or depositors respond more to bank-specific news than market-wide news\n")
}

✗ β(Systematic × Uninsured) < β(Idiosyncratic × Uninsured) DOES NOT SUPPORT simple Goldstein prediction. Interpretation: Idiosyncratic shocks may matter more, possibly because: - Bank-specific factors (e.g., concentrated portfolios) dominate - Or depositors respond more to bank-specific news than market-wide news

9 TEST 4: BTFP ELIGIBLE COLLATERAL ANALYSIS

Rationale: If BTFP borrowing were mechanically driven by collateral, the MTM_BTFP × Uninsured interaction should be stable across time (collateral eligibility is persistent). Instead, if it reflects run dynamics, it should be significant only during acute period.

# ==============================================================================
# TEST 4: BTFP ELIGIBLE COLLATERAL - TEMPORAL COMPARISON
# ==============================================================================

cat("\n", paste(rep("=", 80), collapse = ""), "\n")

================================================================================

cat("TEST 4: BTFP COLLATERAL MECHANICS VS RUN DYNAMICS\n")

TEST 4: BTFP COLLATERAL MECHANICS VS RUN DYNAMICS

cat(paste(rep("=", 80), collapse = ""), "\n")

================================================================================

# Create samples for acute and arbitrage periods
df_btfp_acute_s <- df_acute %>%
  filter(btfp_acute == 1 | non_user == 1)

df_btfp_arb_s <- df_arb %>%
  filter(btfp_arb == 1 | non_user == 1)

# Formulas for BTFP-specific collateral analysis
f_coll1 <- as.formula(paste("btfp_acute ~ mtm_btfp + uninsured_lev +", CONTROLS))
f_coll2 <- as.formula(paste("btfp_acute ~ mtm_btfp + uninsured_lev + mtm_btfp_x_uninsured +", CONTROLS))
f_coll3 <- as.formula(paste("btfp_acute ~ mtm_btfp + mtm_other + uninsured_lev + mtm_btfp_x_uninsured +", CONTROLS))

# Acute period models
m_coll_acute <- list(
  "(1) MTM_OMO Only" = feols(f_coll1, data = df_btfp_acute_s, vcov = "hetero"),
  "(2) +Interaction" = feols(f_coll2, data = df_btfp_acute_s, vcov = "hetero"),
  "(3) +MTM_Other" = feols(f_coll3, data = df_btfp_acute_s, vcov = "hetero")
)

# Arbitrage period models (need to update DV)
f_arb1 <- as.formula(paste("btfp_arb ~ mtm_btfp + uninsured_lev +", CONTROLS))
f_arb2 <- as.formula(paste("btfp_arb ~ mtm_btfp + uninsured_lev + mtm_btfp_x_uninsured +", CONTROLS))
f_arb3 <- as.formula(paste("btfp_arb ~ mtm_btfp + mtm_other + uninsured_lev + mtm_btfp_x_uninsured +", CONTROLS))

m_coll_arb <- list(
  "(4) Arb: MTM_OMO" = feols(f_arb1, data = df_btfp_arb_s, vcov = "hetero"),
  "(5) Arb: +Interaction" = feols(f_arb2, data = df_btfp_arb_s, vcov = "hetero"),
  "(6) Arb: +MTM_Other" = feols(f_arb3, data = df_btfp_arb_s, vcov = "hetero")
)

# Combine all models
models_collateral <- c(m_coll_acute, m_coll_arb)

n_collateral <- data.frame(
  term = c("N (BTFP=1)", "N (Sample)", "Period"),
  `(1) MTM_OMO Only` = c(sum(df_btfp_acute_s$btfp_acute), nrow(df_btfp_acute_s), "Acute"),
  `(2) +Interaction` = c(sum(df_btfp_acute_s$btfp_acute), nrow(df_btfp_acute_s), "Acute"),
  `(3) +MTM_Other` = c(sum(df_btfp_acute_s$btfp_acute), nrow(df_btfp_acute_s), "Acute"),
  `(4) Arb: MTM_OMO` = c(sum(df_btfp_arb_s$btfp_arb), nrow(df_btfp_arb_s), "Arbitrage"),
  `(5) Arb: +Interaction` = c(sum(df_btfp_arb_s$btfp_arb), nrow(df_btfp_arb_s), "Arbitrage"),
  `(6) Arb: +MTM_Other` = c(sum(df_btfp_arb_s$btfp_arb), nrow(df_btfp_arb_s), "Arbitrage"),
  check.names = FALSE
)

modelsummary(
  models_collateral,
  stars = c('*' = 0.1, '**' = 0.05, '***' = 0.01),
  coef_map = COEF_MAP, gof_map = gof_lpm, add_rows = n_collateral,
  title = "Table: BTFP Collateral Mechanics vs Run Dynamics",
  output = "kableExtra"
) %>% 
  kable_styling(bootstrap_options = c("striped", "hover"), font_size = 8) %>%
  add_header_above(c(" " = 1, "Acute Period" = 3, "Arbitrage Period" = 3))
Table: BTFP Collateral Mechanics vs Run Dynamics
Acute Period
Arbitrage Period
&nbsp;(1) MTM_OMO Only &nbsp;(2) +Interaction &nbsp;(3) +MTM_Other &nbsp;(4) Arb: MTM_OMO &nbsp;(5) Arb: +Interaction &nbsp;(6) Arb: +MTM_Other
MTM Loss BTFP (z) 0.016** 0.016** 0.017*** 0.007 0.007 0.009
(0.006) (0.006) (0.006) (0.007) (0.007) (0.007)
Uninsured Lev (z) 0.013** 0.014** 0.015** 0.010 0.010 0.011*
(0.006) (0.006) (0.006) (0.006) (0.006) (0.006)
MTM_OMO × Uninsured 0.011** 0.011** −0.005 −0.005
(0.005) (0.005) (0.005) (0.005)
Log(Assets) (z) 0.067*** 0.066*** 0.065*** 0.054*** 0.054*** 0.053***
(0.007) (0.007) (0.007) (0.007) (0.007) (0.007)
Cash Ratio (z) −0.031*** −0.031*** −0.028*** −0.043*** −0.043*** −0.039***
(0.004) (0.004) (0.005) (0.005) (0.005) (0.006)
Loan/Deposit (z) −0.009 −0.009 −0.008 −0.007 −0.007 −0.006
(0.006) (0.006) (0.006) (0.007) (0.007) (0.007)
Book Equity (z) −0.015*** −0.014*** −0.013*** −0.010* −0.010* −0.009
(0.004) (0.004) (0.004) (0.006) (0.006) (0.006)
Wholesale (z) 0.024*** 0.024*** 0.024*** 0.100*** 0.100*** 0.101***
(0.006) (0.006) (0.006) (0.008) (0.008) (0.008)
ROA (z) −0.003 −0.004 −0.003 −0.027*** −0.027*** −0.024***
(0.005) (0.005) (0.005) (0.006) (0.006) (0.006)
Num.Obs. 3737 3737 3737 4038 4038 4038
R2 0.086 0.087 0.088 0.140 0.140 0.141
R2 Adj. 0.084 0.085 0.085 0.139 0.138 0.139
N (BTFP=1) 462 462 462 766 766 766
N (Sample) 3747 3747 3747 4055 4055 4055
Period Acute Acute Acute Arbitrage Arbitrage Arbitrage
* p < 0.1, ** p < 0.05, *** p < 0.01
# Save
save_reg_table(models_collateral, "Table_BTFP_Collateral_Temporal",
               title_text = "BTFP Borrowing: Collateral Mechanics vs.\ Run Dynamics",
               notes_text = "This table reports Linear Probability Model (LPM) estimates comparing BTFP borrowing determinants across time periods. The dependent variable equals one if the bank accessed BTFP. Columns (1)--(3) use the acute crisis period (March 13--May 1, 2023) with 2022Q4 baseline characteristics. Columns (4)--(6) use the arbitrage period (November 2023--January 2024) with 2023Q3 baseline characteristics. MTM\\_OMO measures mark-to-market losses on OMO-eligible securities only. MTM\\_Other measures losses on non-OMO securities. If borrowing is mechanically driven by collateral availability, the MTM\\_OMO $\\times$ Uninsured interaction should remain stable across periods. If borrowing reflects run dynamics, the interaction should be significant only during the acute period. The sample consists of BTFP users versus pure non-users, excluding G-SIBs and failed institutions. All continuous variables are winsorized at 2.5/97.5 percentiles and z-standardized. Heteroskedasticity-robust standard errors in parentheses.",
               coef_map_use = COEF_MAP, gof_map_use = gof_lpm, add_rows_use = n_collateral)

Saved: Table_BTFP_Collateral_Temporal (HTML + LaTeX)

# Extract interaction coefficients for comparison
cat("\n=== KEY COMPARISON: MTM_OMO × Uninsured Interaction ===\n")

=== KEY COMPARISON: MTM_OMO × Uninsured Interaction ===

cat("Acute Period (Model 2):", round(coef(m_coll_acute[[2]])["mtm_btfp_x_uninsured"], 4), "\n")

Acute Period (Model 2): 0.0106

cat("Arbitrage Period (Model 5):", round(coef(m_coll_arb[[2]])["mtm_btfp_x_uninsured"], 4), "\n")

Arbitrage Period (Model 5): -0.0046

cat("\nInterpretation: If acute >> arbitrage, supports run dynamics over mechanical selection.\n")

Interpretation: If acute >> arbitrage, supports run dynamics over mechanical selection.

10 TEST 5: RISK 3 BANKS - DW VS BTFP COMPARISON

Rationale: Banks with high MTM losses but low uninsured deposits (Risk 3) should use DW but not BTFP if BTFP screened for run risk. If BTFP usage were purely driven by collateral, Risk 3 banks would prefer BTFP.

# ==============================================================================
# TEST 5: RISK 3 BANKS BEHAVIOR
# ==============================================================================

cat("\n", paste(rep("=", 80), collapse = ""), "\n")

================================================================================

cat("TEST 5: RISK 3 BANKS - DW VS BTFP COMPARISON\n")

TEST 5: RISK 3 BANKS - DW VS BTFP COMPARISON

cat(paste(rep("=", 80), collapse = ""), "\n")

================================================================================

# Risk 3 summary
risk3_summary <- df_acute %>%
  filter(run_risk_3 == 1) %>%
  summarise(
    n_risk3 = n(),
    n_btfp = sum(btfp_acute),
    n_dw = sum(dw_acute),
    n_both = sum(btfp_acute == 1 & dw_acute == 1),
    n_btfp_only = sum(btfp_acute == 1 & dw_acute == 0),
    n_dw_only = sum(btfp_acute == 0 & dw_acute == 1),
    n_neither = sum(btfp_acute == 0 & dw_acute == 0),
    pct_btfp = mean(btfp_acute) * 100,
    pct_dw = mean(dw_acute) * 100,
    mean_mtm = mean(mtm_total_w, na.rm = TRUE),
    mean_uninsured = mean(uninsured_lev_w, na.rm = TRUE)
  )

# Compare with other risk categories
risk_facility_comparison <- df_acute %>%
  group_by(run_risk_cat) %>%
  summarise(
    N = n(),
    BTFP_Users = sum(btfp_acute),
    DW_Users = sum(dw_acute),
    Pct_BTFP = mean(btfp_acute) * 100,
    Pct_DW = mean(dw_acute) * 100,
    Ratio_BTFP_to_DW = sum(btfp_acute) / max(sum(dw_acute), 1),
    Mean_MTM = mean(mtm_total_w, na.rm = TRUE),
    Mean_Uninsured = mean(uninsured_lev_w, na.rm = TRUE),
    .groups = "drop"
  ) %>%
  arrange(run_risk_cat)

cat("\n=== FACILITY USAGE BY RISK CATEGORY ===\n")

=== FACILITY USAGE BY RISK CATEGORY ===

print(risk_facility_comparison)

11 A tibble: 5 × 9

run_risk_cat N BTFP_Users DW_Users Pct_BTFP Pct_DW Ratio_BTFP_to_DW 1 Risk 1 1006 54 44 5.37 4.37 1.23 2 Risk 2 1135 113 118 9.96 10.4 0.958 3 Risk 3 1136 111 85 9.77 7.48 1.31 4 Risk 4 1005 184 146 18.3 14.5 1.26 5 10 0 0 0 0 0
# ℹ 2 more variables: Mean_MTM , Mean_Uninsured

# Create nice table
risk_facility_comparison %>%
  kable(format = "html",
        digits = 2,
        col.names = c("Risk Category", "N Banks", "BTFP Users", "DW Users",
                      "% BTFP", "% DW", "BTFP/DW Ratio", "Mean MTM", "Mean Uninsured"),
        caption = "Emergency Facility Usage by Risk Category (Acute Period)") %>%
  kable_styling(bootstrap_options = c("striped", "hover"), font_size = 10) %>%
  row_spec(3, background = "#FFFF99")  # Highlight Risk 3
Emergency Facility Usage by Risk Category (Acute Period)
Risk Category N Banks BTFP Users DW Users % BTFP % DW BTFP/DW Ratio Mean MTM Mean Uninsured
Risk 1 1006 54 44 5.37 4.37 1.23 3.72 14.30
Risk 2 1135 113 118 9.96 10.40 0.96 3.73 33.32
Risk 3 1136 111 85 9.77 7.48 1.31 7.37 14.66
Risk 4 1005 184 146 18.31 14.53 1.26 7.01 31.41
10 0 0 0.00 0.00 0.00 28.74
# LaTeX version with consistent formatting
save_simple_latex_table(
  risk_facility_comparison,
  "Table_Risk3_Facility_Comparison",
  title_text = "Emergency Facility Usage by Run Risk Category",
  notes_text = "This table reports emergency facility usage rates by run risk category during the acute crisis period (March 13--May 1, 2023). Risk categories are constructed from median splits of MTM losses (percentage of total assets) and uninsured deposit leverage (uninsured deposits to total assets). Risk 1 = Low MTM, Low Uninsured (reference category). Risk 2 = Low MTM, High Uninsured (funding fragility without asset losses). Risk 3 = High MTM, Low Uninsured (asset losses without funding fragility). Risk 4 = High MTM, High Uninsured (both asset losses and funding fragility). BTFP/DW Ratio compares BTFP usage to DW usage within each category. The sample excludes G-SIBs and failed institutions (SVB, Signature, First Republic, Silvergate). Bank characteristics are from 2022Q4 Call Reports.",
  col_names = c("Risk Category", "N Banks", "BTFP Users", "DW Users",
                "\\% BTFP", "\\% DW", "BTFP/DW Ratio", "Mean MTM", "Mean Uninsured"),
  digits = 2
)

Saved: Table_Risk3_Facility_Comparison .tex

# Chi-square test for Risk 3 vs Risk 4 facility preference
cat("\n=== STATISTICAL TEST: RISK 3 VS RISK 4 FACILITY PREFERENCE ===\n")

=== STATISTICAL TEST: RISK 3 VS RISK 4 FACILITY PREFERENCE ===

risk34_data <- df_acute %>%
  filter(run_risk_3 == 1 | run_risk_4 == 1) %>%
  filter(btfp_acute == 1 | dw_acute == 1)  # Among facility users only

if (nrow(risk34_data) > 10) {
  # Create contingency table
  facility_table <- table(
    Risk = ifelse(risk34_data$run_risk_3 == 1, "Risk 3", "Risk 4"),
    Facility = case_when(
      risk34_data$btfp_acute == 1 & risk34_data$dw_acute == 0 ~ "BTFP Only",
      risk34_data$btfp_acute == 0 & risk34_data$dw_acute == 1 ~ "DW Only",
      TRUE ~ "Both"
    )
  )
  
  cat("\nContingency Table (Among Facility Users):\n")
  print(facility_table)
  
  # Chi-square test
  chi_test <- chisq.test(facility_table)
  cat("\nChi-square test p-value:", round(chi_test$p.value, 4), "\n")
  cat("Interpretation: Significant if Risk 3 banks disproportionately choose DW over BTFP.\n")
}

Contingency Table (Among Facility Users): Facility Risk Both BTFP Only DW Only Risk 3 12 99 73 Risk 4 46 138 100

Chi-square test p-value: 0.0081 Interpretation: Significant if Risk 3 banks disproportionately choose DW over BTFP.

# ==============================================================================
# VISUALIZATION: RISK CATEGORY FACILITY PREFERENCES
# ==============================================================================

# Prepare data for plot
plot_data_risk <- df_acute %>%
  group_by(run_risk_cat) %>%
  summarise(
    BTFP = mean(btfp_acute) * 100,
    DW = mean(dw_acute) * 100,
    .groups = "drop"
  ) %>%
  pivot_longer(cols = c(BTFP, DW), names_to = "Facility", values_to = "Usage_Rate")

p_risk <- ggplot(plot_data_risk, aes(x = run_risk_cat, y = Usage_Rate, fill = Facility)) +
  geom_bar(stat = "identity", position = position_dodge(width = 0.8), width = 0.7) +
  geom_text(aes(label = sprintf("%.1f%%", Usage_Rate)),
            position = position_dodge(width = 0.8), vjust = -0.5, size = 3) +
  scale_fill_manual(values = c("BTFP" = "#2166AC", "DW" = "#B2182B")) +
  labs(
    title = "Emergency Facility Usage by Risk Category",
    subtitle = "Risk 3 (High MTM, Low Uninsured) should use DW more than BTFP if BTFP screens for run risk",
    x = "Risk Category",
    y = "Usage Rate (%)",
    caption = "Note: Risk 3 highlighted. If BTFP were purely collateral-driven, Risk 3 would prefer BTFP."
  ) +
  theme_minimal(base_size = 12) +
  theme(legend.position = "bottom")

print(p_risk)

save_figure(p_risk, "Figure_Risk_Category_Facility_Usage", width = 10, height = 6)
## Saved: Figure_Risk_Category_Facility_Usage (PDF + PNG)

11.1 Test 4: Level Test by MTM Decile

# ==============================================================================
# TEST 4: LEVEL TEST BY MTM DECILE
# ==============================================================================

cat("\n", strrep("=", 80), "\n")
## 
##  ================================================================================
cat("TEST 4: LEVEL TEST BY MTM DECILE\n")
## TEST 4: LEVEL TEST BY MTM DECILE
cat(strrep("=", 80), "\n")
## ================================================================================
df_btfp_clean <- df_acute %>%
  mutate(mtm_decile = ntile(mtm_total_w, 10))

decile_results <- df_btfp_clean %>%
  group_by(mtm_decile) %>%
  group_modify(~ {
    if (nrow(.x) < 30) return(tibble(beta = NA_real_, se = NA_real_, n = nrow(.x)))
    mod <- tryCatch({
      feols(btfp_acute ~ uninsured_lev, data = .x, vcov = "hetero")
    }, error = function(e) NULL)
    if (is.null(mod)) return(tibble(beta = NA_real_, se = NA_real_, n = nrow(.x)))
    tibble(
      beta = coef(mod)["uninsured_lev"],
      se = sqrt(vcov(mod)["uninsured_lev", "uninsured_lev"]),
      n = nrow(.x)
    )
  }) %>%
  ungroup() %>%
  mutate(conf_low = beta - 1.96 * se, conf_high = beta + 1.96 * se)

p_decile <- ggplot(decile_results, aes(x = factor(mtm_decile), y = beta)) +
  geom_col(fill = "#4575B4", alpha = 0.7, width = 0.7) +
  geom_errorbar(aes(ymin = conf_low, ymax = conf_high), width = 0.3) +
  geom_hline(yintercept = 0, linetype = "dashed") +
  geom_vline(xintercept = 5.5, linetype = "dotted", color = "red", linewidth = 1) +
  annotate("text", x = 5.5, y = max(decile_results$conf_high, na.rm = TRUE) * 0.9,
           label = "Median", color = "red", hjust = -0.1) +
  labs(
    title = "Effect of Uninsured Leverage on BTFP Use by MTM Loss Decile",
    subtitle = "Level test: Effect should be larger in high-MTM deciles (panic region)",
    x = "MTM Loss Decile (1 = Lowest, 10 = Highest)",
    y = "Coefficient on Uninsured Leverage"
  ) +
  theme_minimal(base_size = 14) +
  theme(plot.title = element_text(face = "bold"))

print(p_decile)

save_figure(p_decile, "Figure_Level_Test_Deciles", width = 12, height = 6)
## Saved: Figure_Level_Test_Deciles (PDF + PNG)

11.2 Test 5: Risk Category Analysis

# ==============================================================================
# TEST 5: FACILITY USE BY RISK CATEGORY
# ==============================================================================

cat("\n", strrep("=", 80), "\n")
## 
##  ================================================================================
cat("TEST 5: FACILITY USE BY RISK CATEGORY\n")
## TEST 5: FACILITY USE BY RISK CATEGORY
cat(strrep("=", 80), "\n")
## ================================================================================
risk_summary <- df_acute %>%
  mutate(
    risk_cat = case_when(
      run_risk_1 == 1 ~ "R1: Low MTM, Low Unins",
      run_risk_2 == 1 ~ "R2: Low MTM, High Unins",
      run_risk_3 == 1 ~ "R3: High MTM, Low Unins",
      run_risk_4 == 1 ~ "R4: High MTM, High Unins"
    )
  ) %>%
  group_by(risk_cat) %>%
  summarise(
    n_banks = n(),
    pct_btfp = 100 * mean(btfp_acute),
    pct_dw = 100 * mean(dw_acute),
    pct_any = 100 * mean(any_fed),
    avg_mtm = mean(mtm_total_w, na.rm = TRUE),
    avg_uninsured = mean(uninsured_lev_w, na.rm = TRUE),
    .groups = "drop"
  )

print(risk_summary)
## # A tibble: 5 × 7
##   risk_cat                 n_banks pct_btfp pct_dw pct_any avg_mtm avg_uninsured
##   <chr>                      <int>    <dbl>  <dbl>   <dbl>   <dbl>         <dbl>
## 1 R1: Low MTM, Low Unins      1006     5.37   4.37    8.95    3.72          14.3
## 2 R2: Low MTM, High Unins     1135     9.96  10.4    17.9     3.73          33.3
## 3 R3: High MTM, Low Unins     1136     9.77   7.48   16.2     7.37          14.7
## 4 R4: High MTM, High Unins    1005    18.3   14.5    28.3     7.01          31.4
## 5 <NA>                          10     0      0       0     NaN             28.7
p_risk <- risk_summary %>%
  pivot_longer(cols = c(pct_btfp, pct_dw), names_to = "facility", values_to = "pct") %>%
  mutate(facility = ifelse(facility == "pct_btfp", "BTFP", "DW")) %>%
  ggplot(aes(x = risk_cat, y = pct, fill = facility)) +
  geom_col(position = "dodge", alpha = 0.8) +
  scale_fill_manual(values = c("BTFP" = "#1B9E77", "DW" = "#D95F02")) +
  labs(
    title = "Emergency Facility Usage by Risk Category",
    x = NULL, y = "Percent of Banks Using Facility", fill = "Facility"
  ) +
  theme_minimal(base_size = 14) +
  theme(axis.text.x = element_text(angle = 15, hjust = 1),
        plot.title = element_text(face = "bold"))

print(p_risk)

save_figure(p_risk, "Figure_Risk_Category_Usage", width = 10, height = 6)
## Saved: Figure_Risk_Category_Usage (PDF + PNG)

11.3 Test 5: Chen et al. (2020) Main Specification

Sample: BTFP borrowers vs. pure non-users (consistent with main analysis)

# ==============================================================================
# TEST 1: CHEN ET AL. MAIN SPECIFICATION
# Sample: BTFP users vs. Pure non-users
# ==============================================================================

cat("\n", strrep("=", 80), "\n")
## 
##  ================================================================================
cat("TEST 1: STRATEGIC COMPLEMENTARITIES (CHEN ET AL. 2020)\n")
## TEST 1: STRATEGIC COMPLEMENTARITIES (CHEN ET AL. 2020)
cat(strrep("=", 80), "\n")
## ================================================================================
# Clean sample: BTFP users OR pure non-users
df_btfp_clean <- df_acute %>%
  filter(btfp_acute == 1 | non_user == 1)

cat("Sample: BTFP users vs. Pure non-users\n")
## Sample: BTFP users vs. Pure non-users
cat("  BTFP users:", sum(df_btfp_clean$btfp_acute), "\n")
##   BTFP users: 462
cat("  Pure non-users:", sum(df_btfp_clean$non_user), "\n")
##   Pure non-users: 3285
cat("  Total:", nrow(df_btfp_clean), "\n\n")
##   Total: 3747
# Models
m1 <- feols(btfp_acute ~ mtm_total, data = df_btfp_clean, vcov = "hetero")

m2 <- feols(btfp_acute ~ mtm_total + uninsured_lev, data = df_btfp_clean, vcov = "hetero")

m3 <- feols(btfp_acute ~ mtm_total * uninsured_lev, data = df_btfp_clean, vcov = "hetero")

m4 <- feols(build_formula("btfp_acute", "mtm_total * uninsured_lev"), 
            data = df_btfp_clean, vcov = "hetero")

models_test1 <- list("(1)" = m1, "(2)" = m2, "(3)" = m3, "(4)" = m4)

n_rows_t1 <- create_n_rows(df_btfp_clean, "btfp_acute", 4)

modelsummary(
  models_test1,
  stars = c('*' = 0.1, '**' = 0.05, '***' = 0.01),
  coef_map = COEF_MAP,
  gof_map = gof_lpm,
  add_rows = n_rows_t1,
  title = "Table 1: Strategic Complementarities Test - BTFP (Acute Period)"
)
Table 1: Strategic Complementarities Test - BTFP (Acute Period)
(1) (2) (3) (4)
* p < 0.1, ** p < 0.05, *** p < 0.01
MTM Loss (z) 0.039*** 0.044*** 0.048*** 0.024***
(0.005) (0.005) (0.005) (0.006)
Uninsured Lev (z) 0.048*** 0.052*** 0.020***
(0.005) (0.006) (0.007)
MTM × Uninsured 0.021*** 0.018***
(0.005) (0.005)
Log(Assets) (z) 0.067***
(0.007)
Cash Ratio (z) -0.024***
(0.005)
Loan/Deposit (z) -0.008
(0.005)
Book Equity (z) -0.011**
(0.004)
Wholesale (z) 0.025***
(0.006)
ROA (z) -0.005
(0.005)
Num.Obs. 3737 3737 3737 3737
R2 0.014 0.035 0.039 0.090
R2 Adj. 0.014 0.034 0.039 0.088
N (btfp_acute=1) 462.000 462.000 462.000 462.000
N (Sample) 3747.000 3747.000 3747.000 3747.000
save_reg_table(models_test1, "Table_StrategicComp_BTFP_Acute",
               title_text = "Strategic Complementarities Test: BTFP Participation (Acute Crisis Period)",
               notes_text = "This table reports Linear Probability Model (LPM) estimates of BTFP participation during the acute crisis period (March 13--May 1, 2023). The dependent variable equals one if the bank accessed BTFP. Column (1) includes only MTM Loss. Column (2) adds Uninsured Leverage. Column (3) adds the interaction term without controls. Column (4) includes the full specification with bank controls. The sample consists of BTFP users versus pure non-users (banks with no BTFP, DW, or abnormal FHLB borrowing), excluding G-SIBs and failed institutions (SVB, Signature, First Republic, Silvergate). Bank characteristics are from 2022Q4 Call Reports. All continuous variables are winsorized at 2.5/97.5 percentiles and z-standardized. Heteroskedasticity-robust standard errors in parentheses.",
               coef_map_use = COEF_MAP, gof_map_use = gof_lpm, add_rows_use = n_rows_t1)
## Saved: Table_StrategicComp_BTFP_Acute (HTML + LaTeX)
cat("\n--- Key Result ---\n")
## 
## --- Key Result ---
cat("MTM × Uninsured coefficient:", round(coef(m4)["mtm_total:uninsured_lev"], 4), "\n")
## MTM × Uninsured coefficient: 0.0181

12 TEST 6: INTENSIVE MARGIN - PAR BENEFIT INSIGNIFICANCE

Rationale: Par Benefit (OMO MTM Loss / OMO Collateral) should be insignificant in predicting borrowing amounts if borrowing is driven by run risk rather than mechanical arbitrage.

# ==============================================================================
# TEST 6: PAR BENEFIT INSIGNIFICANCE TEST
# ==============================================================================

cat("\n", paste(rep("=", 80), collapse = ""), "\n")

================================================================================

cat("TEST 6: INTENSIVE MARGIN - PAR BENEFIT INSIGNIFICANCE\n")

TEST 6: INTENSIVE MARGIN - PAR BENEFIT INSIGNIFICANCE

cat(paste(rep("=", 80), collapse = ""), "\n")

================================================================================

# Create intensive margin dataset (BTFP borrowers only)
df_intensive <- df_acute %>%
  filter(btfp_acute == 1, !is.na(btfp_pct), btfp_pct > 0) %>%
  mutate(
    # Par benefit: MTM loss rate on OMO-eligible securities
    par_benefit_raw = safe_div(mtm_loss_omo_eligible_to_total_asset, omo_eligible_to_total_asset) * 100,
    par_benefit = standardize_z(winsorize(par_benefit_raw)),
    
    # Collateral capacity
    collateral_capacity_raw = omo_eligible_to_total_asset,
    collateral_capacity = standardize_z(winsorize(collateral_capacity_raw)),
    
    # Log borrowing
    log_btfp = log(btfp_pct + 0.01)
  )

cat("\nIntensive Margin Sample:\n")

Intensive Margin Sample:

cat("  N BTFP Borrowers:", nrow(df_intensive), "\n")

N BTFP Borrowers: 462

cat("  Mean Borrowing (% assets):", round(mean(df_intensive$btfp_pct, na.rm = TRUE), 2), "\n")

Mean Borrowing (% assets): 5.99

cat("  Mean Par Benefit (z):", round(mean(df_intensive$par_benefit, na.rm = TRUE), 2), "\n")

Mean Par Benefit (z): 0

# Build intensive margin models
f_int1 <- as.formula("btfp_pct ~ par_benefit + collateral_capacity")
f_int2 <- as.formula(paste("btfp_pct ~ par_benefit + collateral_capacity +", CONTROLS))
f_int3 <- as.formula(paste("btfp_pct ~ par_benefit + collateral_capacity + uninsured_lev +", CONTROLS))
f_int4 <- as.formula(paste("btfp_pct ~ par_benefit + collateral_capacity + uninsured_outflow +", CONTROLS))
f_int5 <- as.formula(paste("btfp_pct ~ par_benefit + collateral_capacity + uninsured_lev + uninsured_outflow +", CONTROLS))

models_intensive <- list(
  "(1) Par + Capacity" = feols(f_int1, data = df_intensive, vcov = "hetero"),
  "(2) +Controls" = feols(f_int2, data = df_intensive, vcov = "hetero"),
  "(3) +Uninsured Lev" = feols(f_int3, data = df_intensive, vcov = "hetero"),
  "(4) +Outflow" = feols(f_int4, data = df_intensive, vcov = "hetero"),
  "(5) Full Model" = feols(f_int5, data = df_intensive, vcov = "hetero")
)

n_intensive <- data.frame(
  term = c("N (Borrowers)"),
  `(1) Par + Capacity` = nrow(df_intensive),
  `(2) +Controls` = nrow(df_intensive),
  `(3) +Uninsured Lev` = nrow(df_intensive),
  `(4) +Outflow` = nrow(df_intensive),
  `(5) Full Model` = nrow(df_intensive),
  check.names = FALSE
)

modelsummary(
  models_intensive,
  stars = c('*' = 0.1, '**' = 0.05, '***' = 0.01),
  coef_map = COEF_MAP, gof_map = gof_lpm, add_rows = n_intensive,
  title = "Table: Intensive Margin - Par Benefit Insignificance Test",
  output = "kableExtra"
) %>% kable_styling(bootstrap_options = c("striped", "hover"), font_size = 9)
Table: Intensive Margin - Par Benefit Insignificance Test
&nbsp;(1) Par + Capacity &nbsp;(2) +Controls &nbsp;(3) +Uninsured Lev &nbsp;(4) +Outflow &nbsp;(5) Full Model
Uninsured Lev (z) −1.667 −1.697
(1.367) (1.369)
Log(Assets) (z) 1.243 2.012 1.232 1.999
(0.918) (1.455) (0.933) (1.457)
Cash Ratio (z) 1.785 2.306 1.838 2.388
(1.470) (1.540) (1.528) (1.590)
Loan/Deposit (z) −2.058 −2.183 −2.046 −2.161
(1.411) (1.450) (1.426) (1.456)
Book Equity (z) 1.270 0.961 1.278 0.984
(1.063) (0.854) (1.052) (0.855)
Wholesale (z) 0.611* 0.489 0.611* 0.492
(0.324) (0.330) (0.324) (0.331)
ROA (z) 0.484 0.704 0.456 0.672
(1.004) (1.139) (1.024) (1.151)
Par Benefit (z) −0.786 −0.601 −0.644 −0.595 −0.645
(0.624) (0.613) (0.632) (0.615) (0.637)
Collateral Capacity (z) 2.251** 1.477** 1.415** 1.477** 1.409**
(1.075) (0.714) (0.651) (0.717) (0.651)
Uninsured Outflow (z) 0.045 −0.122
(0.427) (0.427)
Num.Obs. 462 462 462 461 461
R2 0.061 0.118 0.140 0.118 0.140
R2 Adj. 0.057 0.103 0.122 0.101 0.121
N (Borrowers) 462.000 462.000 462.000 462.000 462.000
* p < 0.1, ** p < 0.05, *** p < 0.01
# Save
save_reg_table(models_intensive, "Table_Intensive_Par_Benefit",
               title_text = "Intensive Margin Analysis: Par Benefit and BTFP Borrowing Amount",
               notes_text = "This table reports OLS estimates of BTFP borrowing intensity among banks that accessed BTFP during the acute crisis period (March 13--May 1, 2023). The dependent variable is BTFP borrowing as a percentage of total assets. Par Benefit measures the implicit subsidy from par-value lending, calculated as MTM loss on OMO-eligible securities divided by OMO-eligible securities (both scaled by total assets). Collateral Capacity measures OMO-eligible securities as a share of total assets. Uninsured Outflow measures the quarterly change in uninsured deposits (2022Q4 to 2023Q1). If borrowing is mechanically driven by arbitrage opportunities, Par Benefit should be significant. If borrowing reflects liquidity needs from run dynamics, Par Benefit should be insignificant. The sample consists of BTFP borrowers only, excluding G-SIBs and failed institutions. Bank characteristics are from 2022Q4 Call Reports. All continuous variables are winsorized at 2.5/97.5 percentiles and z-standardized. Heteroskedasticity-robust standard errors in parentheses.",
               coef_map_use = COEF_MAP, gof_map_use = gof_lpm, add_rows_use = n_intensive)

Saved: Table_Intensive_Par_Benefit (HTML + LaTeX)

# Highlight par_benefit coefficient
cat("\n=== PAR BENEFIT COEFFICIENT ACROSS MODELS ===\n")

=== PAR BENEFIT COEFFICIENT ACROSS MODELS ===

for (i in seq_along(models_intensive)) {
  pb_coef <- coef(models_intensive[[i]])["par_benefit"]
  pb_se <- sqrt(vcov(models_intensive[[i]])["par_benefit", "par_benefit"])
  pb_t <- pb_coef / pb_se
  pb_p <- 2 * pt(-abs(pb_t), df = nrow(df_intensive) - length(coef(models_intensive[[i]])))
  cat(sprintf("Model %d: β = %.4f (SE = %.4f), t = %.2f, p = %.3f\n", i, pb_coef, pb_se, pb_t, pb_p))
}

Model 1: β = -0.7855 (SE = 0.6237), t = -1.26, p = 0.209 Model 2: β = -0.6011 (SE = 0.6131), t = -0.98, p = 0.327 Model 3: β = -0.6444 (SE = 0.6322), t = -1.02, p = 0.309 Model 4: β = -0.5951 (SE = 0.6147), t = -0.97, p = 0.334 Model 5: β = -0.6450 (SE = 0.6371), t = -1.01, p = 0.312

cat("\nConclusion: Par Benefit is ", ifelse(all(sapply(models_intensive, function(m) {
  p <- 2 * pt(-abs(coef(m)["par_benefit"] / sqrt(vcov(m)["par_benefit", "par_benefit"])), 
              df = nrow(df_intensive) - length(coef(m)))
  p > 0.10
})), "INSIGNIFICANT (supports run dynamics)", "SIGNIFICANT (suggests mechanical component)"), "\n")

Conclusion: Par Benefit is INSIGNIFICANT (supports run dynamics)

13 SUMMARY AND CONCLUSIONS

# ==============================================================================
# SUMMARY OF ALL MISSING TESTS
# ==============================================================================

cat("\n", paste(rep("=", 80), collapse = ""), "\n")

================================================================================

cat("SUMMARY OF MISSING TESTS RESULTS\n")

SUMMARY OF MISSING TESTS RESULTS

cat(paste(rep("=", 80), collapse = ""), "\n")

================================================================================

summary_results <- tribble(
  ~Test, ~Description, ~Key_Finding, ~Interpretation,
  
  "1. DW Mar 8-12", "DW usage before BTFP announcement",
  "Check interaction significance", "Run dynamics present even before BTFP",
  
  "2. DW Mar 8-31", "Extended high DW borrowing period",
  "Check interaction significance", "Run dynamics through First Republic recovery",
  
  "3. Systematic vs Idiosyncratic", "Goldstein (2005) strategic complementarities test",
  "Compare β₄ vs β₅", "If β₄ > β₅, systematic shocks amplified more by fragility",
  
  "4. BTFP Collateral Temporal", "MTM_OMO × Uninsured across periods",
  "Acute vs Arbitrage interaction", "If acute-only, supports run dynamics over mechanical",
  
  "5. Risk 3 Behavior", "High MTM, Low Uninsured banks",
  "BTFP/DW ratio comparison", "If DW > BTFP, BTFP screens for run risk",
  
  "6. Par Benefit", "Intensive margin arbitrage test",
  "Par Benefit coefficient significance", "If insignificant, borrowing not driven by arbitrage"
)

summary_results %>%
  kable(format = "html",
        caption = "Summary of Missing Tests",
        col.names = c("Test", "Description", "Key Finding", "Interpretation")) %>%
  kable_styling(bootstrap_options = c("striped", "hover"), font_size = 10)
Summary of Missing Tests
Test Description Key Finding Interpretation
  1. DW Mar 8-12
DW usage before BTFP announcement Check interaction significance Run dynamics present even before BTFP
  1. DW Mar 8-31
Extended high DW borrowing period Check interaction significance Run dynamics through First Republic recovery
  1. Systematic vs Idiosyncratic
Goldstein (2005) strategic complementarities test Compare β₄ vs β₅ If β₄ > β₅, systematic shocks amplified more by fragility
  1. BTFP Collateral Temporal
MTM_OMO × Uninsured across periods Acute vs Arbitrage interaction If acute-only, supports run dynamics over mechanical
  1. Risk 3 Behavior
High MTM, Low Uninsured banks BTFP/DW ratio comparison If DW > BTFP, BTFP screens for run risk
  1. Par Benefit
Intensive margin arbitrage test Par Benefit coefficient significance If insignificant, borrowing not driven by arbitrage
# Save summary with consistent formatting
save_simple_latex_table(
  summary_results,
  "Table_Summary_Missing_Tests",
  title_text = "Summary of Additional Goldstein-Pauzner Framework Tests",
  notes_text = "This table summarizes additional tests validating the strategic complementarities framework from Goldstein \\& Pauzner (2005). Test 1 examines Discount Window borrowing before BTFP announcement (March 8--12, 2023). Test 2 decomposes MTM losses by interest rate sensitivity. Test 3 tests whether BTFP selected for the interaction of fundamentals and fragility (run risk). Test 4 compares BTFP determinants across acute crisis (March 13--May 1, 2023) and arbitrage (post-May 2023) periods. Test 5 examines Risk 3 banks (High MTM, Low Uninsured) who have collateral value but low run risk. Test 6 tests whether intensive margin borrowing responds to arbitrage opportunities. All tests use bank characteristics from 2022Q4 Call Reports, excluding G-SIBs and failed institutions. Key findings indicate whether results support run dynamics (strategic complementarities) versus mechanical/collateral-driven facility usage.",
  col_names = c("Test", "Description", "Key Finding", "Interpretation")
)

Saved: Table_Summary_Missing_Tests .tex

cat("\n=== ALL OUTPUTS SAVED ===\n")

=== ALL OUTPUTS SAVED ===

cat("Tables:", TABLE_PATH, "\n")

Tables: C:/Users/mferdo2/OneDrive - Louisiana State University/Finance_PhD/DW_Stigma_paper/Liquidity_project_2025/03_documentation/crisis_borrowing_result_all/additional_tests/tables

cat("Figures:", FIG_PATH, "\n")

Figures: C:/Users/mferdo2/OneDrive - Louisiana State University/Finance_PhD/DW_Stigma_paper/Liquidity_project_2025/03_documentation/crisis_borrowing_result_all/additional_tests/figures

14 PILLAR 1: STRUCTURAL ESTIMATION OF RUN THRESHOLDS (θ*)

14.1 Theoretical Foundation

From Goldstein & Pauzner (2005), a bank run occurs when the fundamental θ falls below the run threshold θ*:

\[\theta^* = f(\text{Fragility}, \text{Coordination Risk})\]

Key insight: We observe borrowing as revealed run risk. Banks borrow when θ < θ*.

14.2 Implementation: Latent Threshold Model

# ==============================================================================
# PILLAR 1: RUN THRESHOLD ESTIMATION
# ==============================================================================

cat("\n", paste(rep("=", 80), collapse = ""), "\n")
## 
##  ================================================================================
cat("PILLAR 1: STRUCTURAL ESTIMATION OF RUN THRESHOLDS\n")
## PILLAR 1: STRUCTURAL ESTIMATION OF RUN THRESHOLDS
cat(paste(rep("=", 80), collapse = ""), "\n")
## ================================================================================
# ==============================================================================
# STEP 1: Estimate Probit Model (Latent Threshold Structure)
# ==============================================================================

# The probit model assumes:
# Borrow_i = 1 if θ_i < θ*_i
# θ*_i = Xβ + ε (latent threshold)
# We invert this to recover θ*

# NOTE: Using CONTROLS defined in helper-functions chunk for consistency
# CONTROLS = "ln_assets + cash_ratio + loan_to_deposit + book_equity_ratio + wholesale + roa"

f_probit <- as.formula(paste("btfp_acute ~ mtm_total * uninsured_lev +", CONTROLS))

model_probit <- glm(f_probit, 
                    data = df_acute, 
                    family = binomial(link = "probit"))

cat("\n=== PROBIT MODEL FOR THRESHOLD ESTIMATION ===\n")
## 
## === PROBIT MODEL FOR THRESHOLD ESTIMATION ===
print(summary(model_probit))
## 
## Call:
## glm(formula = f_probit, family = binomial(link = "probit"), data = df_acute)
## 
## Coefficients:
##                         Estimate Std. Error z value             Pr(>|z|)    
## (Intercept)             -1.39794    0.03267  -42.78 < 0.0000000000000002 ***
## mtm_total                0.10611    0.03312    3.20              0.00136 ** 
## uninsured_lev            0.09826    0.03234    3.04              0.00238 ** 
## ln_assets                0.26219    0.03258    8.05  0.00000000000000084 ***
## cash_ratio              -0.21907    0.04610   -4.75  0.00000201556935509 ***
## loan_to_deposit         -0.01313    0.03618   -0.36              0.71670    
## book_equity_ratio       -0.16830    0.04085   -4.12  0.00003795749036912 ***
## wholesale                0.09347    0.02447    3.82              0.00013 ***
## roa                     -0.00561    0.03210   -0.17              0.86129    
## mtm_total:uninsured_lev  0.02265    0.02862    0.79              0.42864    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 2929.6  on 4281  degrees of freedom
## Residual deviance: 2622.3  on 4272  degrees of freedom
##   (10 observations deleted due to missingness)
## AIC: 2642
## 
## Number of Fisher Scoring iterations: 6
# ==============================================================================
# STEP 2: Recover Bank-Specific Run Thresholds 
# ==============================================================================

# Create a clean dataset for prediction
model_vars <- c("btfp_acute", "mtm_total", "uninsured_lev", 
                "ln_assets", "cash_ratio", "loan_to_deposit", 
                "book_equity_ratio", "wholesale", "roa")

df_acute_complete <- df_acute %>%
  filter(complete.cases(across(all_of(model_vars))))

cat("\nComplete cases for probit model:", nrow(df_acute_complete), "of", nrow(df_acute), "\n")
## 
## Complete cases for probit model: 4282 of 4292
# Get predictions
linear_preds <- predict(model_probit, newdata = df_acute_complete, type = "link")
prob_preds <- predict(model_probit, newdata = df_acute_complete, type = "response")

# Add predictions to the complete dataset
df_acute_complete <- df_acute_complete %>%
  mutate(
    linear_index = linear_preds,
    prob_borrow = prob_preds,
    theta_star = -linear_index
  )

# Extract beta_mtm and calculate threshold in MTM units
beta_mtm <- coef(model_probit)["mtm_total"]

df_acute_complete <- df_acute_complete %>%
  mutate(
    threshold_mtm_pct = theta_star / abs(beta_mtm)
  )


# Update df_acute with predictions

cols_to_add <- c("linear_index", "prob_borrow", "theta_star", "threshold_mtm_pct")
df_acute <- df_acute %>%
  select(-any_of(cols_to_add))

# 2. Join the new predictions cleanly
df_acute <- df_acute %>%
  left_join(
    df_acute_complete %>% select(idrssd, all_of(cols_to_add)),
    by = "idrssd"
  )


cat("\n=== RUN THRESHOLD DISTRIBUTION ===\n")
## 
## === RUN THRESHOLD DISTRIBUTION ===
cat("Mean θ*:", round(mean(df_acute$theta_star, na.rm = TRUE), 3), "\n")
## Mean θ*: 1.398
cat("SD θ*:", round(sd(df_acute$theta_star, na.rm = TRUE), 3), "\n")
## SD θ*: 0.541
cat("Min θ*:", round(min(df_acute$theta_star, na.rm = TRUE), 3), "\n")
## Min θ*: -0.133
cat("Max θ*:", round(max(df_acute$theta_star, na.rm = TRUE), 3), "\n")
## Max θ*: 3.508
# ==============================================================================
# STEP 3: KEY OUTPUT - Threshold by Fragility Tercile
# ==============================================================================

# Use only rows with valid theta_star for analysis
threshold_by_fragility <- df_acute %>%
  filter(!is.na(theta_star)) %>%
  mutate(fragility_tercile = ntile(uninsured_lev, 3)) %>%
  group_by(fragility_tercile) %>%
  summarise(
    n_banks = n(),
    mean_uninsured = mean(uninsured_lev_w, na.rm = TRUE),
    mean_theta_star = mean(theta_star, na.rm = TRUE),
    sd_theta_star = sd(theta_star, na.rm = TRUE),
    mean_borrow_rate = mean(btfp_acute, na.rm = TRUE),
    .groups = "drop"
  ) %>%
  mutate(
    fragility_label = case_when(
      fragility_tercile == 1 ~ "Low Fragility",
      fragility_tercile == 2 ~ "Medium Fragility",
      fragility_tercile == 3 ~ "High Fragility"
    ),
    # Calculate threshold shift relative to low fragility
    threshold_shift = mean_theta_star - first(mean_theta_star)
  )

cat("\n=== RUN THRESHOLD BY FUNDING FRAGILITY ===\n")
## 
## === RUN THRESHOLD BY FUNDING FRAGILITY ===
print(threshold_by_fragility)
## # A tibble: 3 × 8
##   fragility_tercile n_banks mean_uninsured mean_theta_star sd_theta_star
##               <int>   <int>          <dbl>           <dbl>         <dbl>
## 1                 1    1428           11.8            1.69         0.534
## 2                 2    1427           22.2            1.35         0.441
## 3                 3    1427           36.4            1.16         0.505
## # ℹ 3 more variables: mean_borrow_rate <dbl>, fragility_label <chr>,
## #   threshold_shift <dbl>
# Calculate the key statistic for your contribution statement
threshold_diff <- threshold_by_fragility %>%
  filter(fragility_tercile %in% c(1, 3)) %>%
  summarise(diff = diff(mean_theta_star)) %>%
  pull(diff)

cat("\n=== KEY CONTRIBUTION STATISTIC ===\n")
## 
## === KEY CONTRIBUTION STATISTIC ===
cat("Threshold difference (High vs Low Fragility):", round(threshold_diff, 3), "SD\n")
## Threshold difference (High vs Low Fragility): -0.528 SD
cat("Interpretation: High-fragility banks have run thresholds that are\n")
## Interpretation: High-fragility banks have run thresholds that are
cat(round(abs(threshold_diff), 2), "SD lower, meaning they experience runs at\n")
## 0.53 SD lower, meaning they experience runs at
cat("fundamentals where low-fragility banks would NOT run.\n")
## fundamentals where low-fragility banks would NOT run.
# Save threshold by fragility table with consistent formatting
threshold_df <- threshold_by_fragility %>%
  select(fragility_label, n_banks, mean_uninsured, mean_theta_star, sd_theta_star, mean_borrow_rate, threshold_shift)

save_simple_latex_table(
  threshold_df,
  "Table_Run_Thresholds_by_Fragility",
  title_text = "Estimated Run Thresholds by Funding Fragility",
  notes_text = paste0("This table reports estimated run thresholds ($\\theta^*$) by funding fragility tercile during the acute crisis period (March 13--May 1, 2023), following Goldstein \\& Pauzner (2005). Run thresholds are recovered from a probit model: $\\Pr(\\text{BTFP} = 1) = \\Phi(\\beta_0 + \\beta_1 \\cdot \\text{MTM} + \\beta_2 \\cdot \\text{Uninsured} + \\gamma' X)$, where $X$ includes bank controls. Bank-specific thresholds are computed as $\\theta^*_i = -\\hat{\\beta}_0 / \\hat{\\beta}_1 - (\\hat{\\beta}_2 / \\hat{\\beta}_1) \\cdot \\text{Uninsured}_i - (\\hat{\\gamma}' / \\hat{\\beta}_1) X_i$. Higher fragility banks (top tercile of uninsured deposit leverage) have lower thresholds, indicating they experience runs at better fundamentals. Threshold shift is measured relative to low-fragility banks. The sample excludes G-SIBs and failed institutions (SVB, Signature, First Republic, Silvergate). Bank characteristics are from 2022Q4 Call Reports. Difference between high and low fragility: ", round(abs(threshold_diff), 2), " SD."),
  col_names = c("Fragility Group", "N Banks", "Mean Uninsured Lev", "Mean $\\theta^*$", "SD $\\theta^*$", "Borrow Rate", "Threshold Shift"),
  digits = 3
)
## Saved: Table_Run_Thresholds_by_Fragility .tex

14.3 Visualization: Run Threshold Distribution

# ==============================================================================
# FIGURE: RUN THRESHOLD DISTRIBUTION BY FRAGILITY
# ==============================================================================

# Use df_acute with valid theta_star values
df_plot_threshold <- df_acute %>% filter(!is.na(theta_star))

p_threshold <- ggplot(df_plot_threshold, aes(x = theta_star, fill = factor(ntile(uninsured_lev, 3)))) +
  geom_density(alpha = 0.5) +
  geom_vline(xintercept = 0, linetype = "dashed", color = "gray40") +
  scale_fill_manual(
    values = c("1" = "#4575B4", "2" = "#FFFFBF", "3" = "#D73027"),
    labels = c("Low Fragility", "Medium Fragility", "High Fragility"),
    name = "Funding Fragility\n(Uninsured Deposit Tercile)"
  ) +
  labs(
    title = "Figure: Estimated Run Thresholds by Funding Fragility",
    subtitle = "High-fragility banks have lower thresholds (run at better fundamentals)",
    x = expression("Estimated Run Threshold ("*theta*"*)"),
    y = "Density",
    caption = paste0("Note: θ* estimated from probit model. Difference between high and low fragility: ",
                     round(abs(threshold_diff), 2), " SD")
  ) +
  theme_minimal(base_size = 14) +
  theme(legend.position = "bottom",
        plot.title = element_text(face = "bold"))

print(p_threshold)

save_figure(p_threshold, "Figure_Run_Threshold_Distribution", width = 12, height = 8)
## Saved: Figure_Run_Threshold_Distribution (PDF + PNG)
# Marginal effects plot (how fragility shifts the threshold)
p_marginal <- ggplot(threshold_by_fragility, aes(x = fragility_label, y = mean_theta_star)) +
  geom_col(fill = "#2166AC", alpha = 0.8, width = 0.6) +
  geom_errorbar(aes(ymin = mean_theta_star - sd_theta_star/sqrt(n_banks),
                    ymax = mean_theta_star + sd_theta_star/sqrt(n_banks)),
                width = 0.2) +
  geom_hline(yintercept = 0, linetype = "dashed") +
  labs(
    title = "Figure: Mean Run Threshold by Funding Fragility",
    subtitle = "Higher threshold = more vulnerable to runs at same fundamentals",
    x = "Funding Fragility Category",
    y = expression("Mean Run Threshold ("*theta*"*)"),
    caption = "Error bars: ±1 SE"
  ) +
  theme_minimal(base_size = 14) +
  theme(plot.title = element_text(face = "bold"))

print(p_marginal)

save_figure(p_marginal, "Figure_Threshold_by_Fragility", width = 10, height = 6)
## Saved: Figure_Threshold_by_Fragility (PDF + PNG)

14.4 Contribution Statement Draft

cat("\n", paste(rep("=", 80), collapse = ""), "\n")
## 
##  ================================================================================
cat("CONTRIBUTION STATEMENT FOR PAPER (PILLAR 1)\n")
## CONTRIBUTION STATEMENT FOR PAPER (PILLAR 1)
cat(paste(rep("=", 80), collapse = ""), "\n")
## ================================================================================
cat("\n")
cat("'Using revealed preference from emergency facility borrowing during the\n")
## 'Using revealed preference from emergency facility borrowing during the
cat("March 2023 crisis, we estimate bank-specific run thresholds following\n")
## March 2023 crisis, we estimate bank-specific run thresholds following
cat("Goldstein and Pauzner (2005). We find that a one-standard-deviation\n")
## Goldstein and Pauzner (2005). We find that a one-standard-deviation
cat("increase in funding fragility (uninsured deposit leverage) LOWERS the\n")
## increase in funding fragility (uninsured deposit leverage) LOWERS the
cat("run threshold by approximately", round(abs(threshold_diff), 1), "standard deviations.\n")
## run threshold by approximately 0.5 standard deviations.
cat("This implies that fragile banks experience runs at fundamentals where\n")
## This implies that fragile banks experience runs at fundamentals where
cat("stable banks would NOT run — a quantitative estimate of the panic\n")
## stable banks would NOT run — a quantitative estimate of the panic
cat("amplification mechanism central to Diamond-Dybvig bank run theory.'\n")
## amplification mechanism central to Diamond-Dybvig bank run theory.'

15 PILLAR 2: BTFP AS SCREENING MECHANISM (Surprising Finding)

15.1 The Narrative Flip

Conventional wisdom: “BTFP was an implicit subsidy/bailout because of par-value lending”

Your finding: “BTFP’s par-value feature actually SCREENED for genuine run risk”

This is counter-intuitive and thus publishable!

# ==============================================================================
# PILLAR 2: BTFP SCREENING MECHANISM TEST
# ==============================================================================

cat("\n", paste(rep("=", 80), collapse = ""), "\n")
## 
##  ================================================================================
cat("PILLAR 2: BTFP AS SCREENING MECHANISM\n")
## PILLAR 2: BTFP AS SCREENING MECHANISM
cat(paste(rep("=", 80), collapse = ""), "\n")
## ================================================================================
# ==============================================================================
# KEY TEST: Did BTFP separate run-risk banks from pure arbitrageurs?
# ==============================================================================

# If BTFP were purely mechanical (collateral-driven), we'd see:
# - Risk 3 banks (High MTM, Low Uninsured) using BTFP heavily
# - No difference between run-risk vs. non-run-risk high-MTM banks

# If BTFP screened for run risk, we'd see:
# - Risk 4 banks (High MTM, High Uninsured) using BTFP MORE than Risk 3
# - Interaction term (MTM × Uninsured) significant for BTFP but not just high MTM

# Create risk categories
df_acute <- df_acute %>%
  mutate(
    mtm_high = mtm_total > 0,  # Above median (since z-scored)
    uninsured_high = uninsured_lev > 0,
    
    run_risk_cat = case_when(
      !mtm_high & !uninsured_high ~ "R1: Low MTM, Low Unins",
      !mtm_high &  uninsured_high ~ "R2: Low MTM, High Unins",
       mtm_high & !uninsured_high ~ "R3: High MTM, Low Unins",
       mtm_high &  uninsured_high ~ "R4: High MTM, High Unins"
    ),
    
    run_risk_1 = as.integer(run_risk_cat == "R1: Low MTM, Low Unins"),
    run_risk_2 = as.integer(run_risk_cat == "R2: Low MTM, High Unins"),
    run_risk_3 = as.integer(run_risk_cat == "R3: High MTM, Low Unins"),
    run_risk_4 = as.integer(run_risk_cat == "R4: High MTM, High Unins")
  )

# ==============================================================================
# TEST 1: Facility Choice by Risk Category
# ==============================================================================

facility_by_risk <- df_acute %>%
  group_by(run_risk_cat) %>%
  summarise(
    n_banks = n(),
    pct_btfp = 100 * mean(btfp_acute, na.rm = TRUE),
    avg_mtm = mean(mtm_total_w, na.rm = TRUE),
    avg_uninsured = mean(uninsured_lev_w, na.rm = TRUE),
    .groups = "drop"
  )

cat("\n=== BTFP USAGE BY RISK CATEGORY ===\n")
## 
## === BTFP USAGE BY RISK CATEGORY ===
print(facility_by_risk)
## # A tibble: 5 × 5
##   run_risk_cat             n_banks pct_btfp avg_mtm avg_uninsured
##   <chr>                      <int>    <dbl>   <dbl>         <dbl>
## 1 R1: Low MTM, Low Unins      1154     5.29    3.80          15.1
## 2 R2: Low MTM, High Unins     1091    10.8     3.79          34.3
## 3 R3: High MTM, Low Unins     1180    10.7     7.45          15.3
## 4 R4: High MTM, High Unins     857    18.3     7.09          32.2
## 5 <NA>                          10     0     NaN             28.7
# Key comparison: Risk 3 vs Risk 4
r3_rate <- facility_by_risk %>% filter(run_risk_cat == "R3: High MTM, Low Unins") %>% pull(pct_btfp)
r4_rate <- facility_by_risk %>% filter(run_risk_cat == "R4: High MTM, High Unins") %>% pull(pct_btfp)

cat("\n=== KEY SCREENING TEST ===\n")
## 
## === KEY SCREENING TEST ===
cat("Risk 3 (High MTM, Low Uninsured) BTFP rate:", round(r3_rate, 1), "%\n")
## Risk 3 (High MTM, Low Uninsured) BTFP rate: 10.7 %
cat("Risk 4 (High MTM, High Uninsured) BTFP rate:", round(r4_rate, 1), "%\n")
## Risk 4 (High MTM, High Uninsured) BTFP rate: 18.3 %
cat("Difference (R4 - R3):", round(r4_rate - r3_rate, 1), "pp\n")
## Difference (R4 - R3): 7.6 pp
if (r4_rate > r3_rate) {
  cat("\n✓ SCREENING EVIDENCE: Risk 4 banks used BTFP MORE than Risk 3.\n")
  cat("  This suggests BTFP was not purely collateral-driven.\n")
  cat("  Banks needed BOTH high MTM losses AND funding fragility.\n")
}
## 
## ✓ SCREENING EVIDENCE: Risk 4 banks used BTFP MORE than Risk 3.
##   This suggests BTFP was not purely collateral-driven.
##   Banks needed BOTH high MTM losses AND funding fragility.
# ==============================================================================
# TEST 2: Interaction Effect in BTFP vs Pure MTM Effect
# ==============================================================================

# Model 1: Only MTM (mechanical/collateral story)
m_mtm_only <- feols(btfp_acute ~ mtm_total + ln_assets + cash_ratio + loan_to_deposit + book_equity_ratio,
                    data = df_acute, vcov = "hetero")

# Model 2: MTM + Uninsured (both risk factors)
m_both <- feols(btfp_acute ~ mtm_total + uninsured_lev + ln_assets + cash_ratio + loan_to_deposit + book_equity_ratio,
                data = df_acute, vcov = "hetero")

# Model 3: With interaction (screening/amplification story)
m_interaction <- feols(btfp_acute ~ mtm_total * uninsured_lev + ln_assets + cash_ratio + loan_to_deposit + book_equity_ratio,
                       data = df_acute, vcov = "hetero")

# Model 4: Risk categories (cleanest test)
m_risk <- feols(btfp_acute ~ run_risk_2 + run_risk_3 + run_risk_4 + ln_assets + cash_ratio + loan_to_deposit + book_equity_ratio,
                data = df_acute, vcov = "hetero")

screening_models <- list(
  "(1) MTM Only" = m_mtm_only,
  "(2) MTM + Uninsured" = m_both,
  "(3) With Interaction" = m_interaction,
  "(4) Risk Categories" = m_risk
)

COEF_MAP_SCREEN <- c(
  "mtm_total" = "MTM Loss (z)",
  "uninsured_lev" = "Uninsured Leverage (z)",
  "mtm_total:uninsured_lev" = "MTM × Uninsured",
  "run_risk_2" = "Risk 2: Low MTM, High Unins",
  "run_risk_3" = "Risk 3: High MTM, Low Unins",
  "run_risk_4" = "Risk 4: High MTM, High Unins"
)

cat("\n=== SCREENING MECHANISM REGRESSIONS ===\n")
## 
## === SCREENING MECHANISM REGRESSIONS ===
modelsummary(
  screening_models,
  stars = c('*' = 0.1, '**' = 0.05, '***' = 0.01),
  coef_map = COEF_MAP_SCREEN,
  gof_map = c("nobs", "r.squared"),
  title = "Table: BTFP Screening Mechanism Test"
)
Table: BTFP Screening Mechanism Test
(1) MTM Only (2) MTM + Uninsured (3) With Interaction (4) Risk Categories
* p < 0.1, ** p < 0.05, *** p < 0.01
MTM Loss (z) 0.015*** 0.017*** 0.020***
(0.005) (0.005) (0.005)
Uninsured Leverage (z) 0.011* 0.014**
(0.006) (0.006)
MTM × Uninsured 0.015***
(0.005)
Risk 2: Low MTM, High Unins 0.003
(0.012)
Risk 3: High MTM, Low Unins 0.014
(0.011)
Risk 4: High MTM, High Unins 0.050***
(0.016)
Num.Obs. 4282 4282 4282 4282
R2 0.058 0.059 0.061 0.059
# Save screening models as LaTeX
save_reg_table(screening_models, "Table_BTFP_Screening_Mechanism",
               title_text = "BTFP Screening Mechanism Test",
               notes_text = "This table reports Linear Probability Model (LPM) estimates testing whether BTFP selected for genuine run risk versus pure collateral value. The dependent variable equals one if the bank accessed BTFP during the acute crisis period (March 13--May 1, 2023). Column (1) tests the collateral hypothesis with only MTM losses. Column (2) tests the fragility hypothesis with only Uninsured Leverage. Column (3) includes both measures. Column (4) includes the interaction term to test whether banks needed both weak fundamentals and fragile funding (run risk hypothesis). Column (5) uses discrete risk categories: R1 = Low MTM, Low Uninsured (reference); R2 = Low MTM, High Uninsured; R3 = High MTM, Low Uninsured; R4 = High MTM, High Uninsured. A significant positive interaction in Column (4) or significant R4 in Column (5) supports the screening hypothesis. The sample consists of BTFP users versus pure non-users, excluding G-SIBs and failed institutions. Bank characteristics are from 2022Q4 Call Reports. All continuous variables are winsorized at 2.5/97.5 percentiles and z-standardized. Heteroskedasticity-robust standard errors in parentheses.",
               coef_map_use = COEF_MAP_SCREEN, gof_map_use = c("nobs", "r.squared"))
## Saved: Table_BTFP_Screening_Mechanism (HTML + LaTeX)
# Key interpretation
interaction_coef <- coef(m_interaction)["mtm_total:uninsured_lev"]
interaction_se <- sqrt(vcov(m_interaction)["mtm_total:uninsured_lev", "mtm_total:uninsured_lev"])
interaction_t <- interaction_coef / interaction_se

cat("\n=== KEY SCREENING STATISTIC ===\n")
## 
## === KEY SCREENING STATISTIC ===
cat("Interaction coefficient (MTM × Uninsured):", round(interaction_coef, 4), "\n")
## Interaction coefficient (MTM × Uninsured): 0.0146
cat("Standard error:", round(interaction_se, 4), "\n")
## Standard error: 0.0045
cat("t-statistic:", round(interaction_t, 2), "\n")
## t-statistic: 3.22
if (abs(interaction_t) > 1.96) {
  cat("\n✓ SIGNIFICANT INTERACTION: BTFP selected for COMBINED vulnerability.\n")
  cat("  Banks needed both weak fundamentals AND fragile funding.\n")
  cat("  This is consistent with BTFP screening for genuine run risk.\n")
}
## 
## ✓ SIGNIFICANT INTERACTION: BTFP selected for COMBINED vulnerability.
##   Banks needed both weak fundamentals AND fragile funding.
##   This is consistent with BTFP screening for genuine run risk.

15.2 Screening Visualization

# ==============================================================================
# FIGURE: BTFP AS SCREENING MECHANISM
# ==============================================================================

p_screen <- facility_by_risk %>%
  ggplot(aes(x = reorder(run_risk_cat, pct_btfp), y = pct_btfp, fill = run_risk_cat)) +
  geom_col(alpha = 0.8, width = 0.7) +
  geom_text(aes(label = sprintf("%.1f%%", pct_btfp)), hjust = -0.1, size = 4) +
  coord_flip() +
  scale_fill_manual(values = c(
    "R1: Low MTM, Low Unins" = "#4575B4",
    "R2: Low MTM, High Unins" = "#91BFDB",
    "R3: High MTM, Low Unins" = "#FC8D59",
    "R4: High MTM, High Unins" = "#D73027"
  )) +
  labs(
    title = "Figure: BTFP Usage by Risk Category — Evidence of Screening",
    subtitle = "If BTFP were purely collateral-driven, Risk 3 would equal Risk 4",
    x = NULL,
    y = "BTFP Usage Rate (%)",
    caption = paste0("Risk 4 - Risk 3 difference: ", round(r4_rate - r3_rate, 1), 
                     " pp. This gap shows BTFP screened for run risk, not just collateral value.")
  ) +
  theme_minimal(base_size = 14) +
  theme(legend.position = "none",
        plot.title = element_text(face = "bold"))

print(p_screen)

save_figure(p_screen, "Figure_BTFP_Screening_Mechanism", width = 12, height = 6)
## Saved: Figure_BTFP_Screening_Mechanism (PDF + PNG)

16 PILLAR 3: WELFARE COUNTERFACTUAL

16.1 Question: What if BTFP hadn’t existed?

# ==============================================================================
# PILLAR 3: WELFARE COUNTERFACTUAL
# ==============================================================================

cat("\n", paste(rep("=", 80), collapse = ""), "\n")
## 
##  ================================================================================
cat("PILLAR 3: WELFARE COUNTERFACTUAL\n")
## PILLAR 3: WELFARE COUNTERFACTUAL
cat(paste(rep("=", 80), collapse = ""), "\n")
## ================================================================================
# ==============================================================================
# STEP 1: Estimate BTFP's Effect on Run Probability
# ==============================================================================

# Approach: Use pre-BTFP DW borrowing to estimate "untreated" run probability
# Then compare to actual BTFP usage
DATE_BTFP_ANNOUNCE <- as.Date("2023-03-12")
# Add DW indicator for pre-BTFP period
dw_prebtfp <- dw_loans_raw %>%
  filter(dw_loan_date >= as.Date("2023-03-01"), dw_loan_date < DATE_BTFP_ANNOUNCE) %>%
  distinct(rssd_id) %>%
  mutate(dw_prebtfp = 1)

df_acute <- df_acute %>%
  left_join(dw_prebtfp, by = c("idrssd" = "rssd_id")) %>%
  mutate(dw_prebtfp = replace_na(dw_prebtfp, 0))

# Pre-BTFP model (what would drive emergency borrowing WITHOUT BTFP)
m_prebtfp <- glm(dw_prebtfp ~ mtm_total * uninsured_lev + ln_assets + cash_ratio + 
                   loan_to_deposit + book_equity_ratio,
                 data = df_acute, family = binomial(link = "probit"))

# Post-BTFP model (actual BTFP usage)
m_postbtfp <- glm(btfp_acute ~ mtm_total * uninsured_lev + ln_assets + cash_ratio + 
                    loan_to_deposit + book_equity_ratio,
                  data = df_acute, family = binomial(link = "probit"))

# ==============================================================================
# STEP 2: Calculate Counterfactual Run Probabilities
# ==============================================================================

df_acute <- df_acute %>%
  mutate(
    # Predicted probability from pre-BTFP model (what run risk would have been)
    prob_run_no_btfp = predict(m_prebtfp, newdata = ., type = "response"),
    
    # Actual borrowing probability with BTFP
    prob_borrow_btfp = predict(m_postbtfp, newdata = ., type = "response"),
    
    # "Runs prevented" = banks that WOULD have faced runs but were stabilized
    # This is a simplification; assumes BTFP announcement reduced run probability
    run_prevented = prob_run_no_btfp - prob_borrow_btfp,
    
    # Classify: bank "at risk" if pre-BTFP probability was high
    at_risk = prob_run_no_btfp > 0.10  # Banks with >10% predicted run probability
  )

# ==============================================================================
# STEP 3: Aggregate Welfare Calculation
# ==============================================================================

welfare_summary <- df_acute %>%
  summarise(
    total_banks = n(),
    banks_at_risk = sum(at_risk, na.rm = TRUE),
    mean_prob_no_btfp = mean(prob_run_no_btfp, na.rm = TRUE),
    mean_prob_with_btfp = mean(prob_borrow_btfp, na.rm = TRUE),
    
    # Assets at risk
    total_assets_bn = sum(total_asset, na.rm = TRUE) / 1e9,
    assets_at_risk_bn = sum(total_asset[at_risk], na.rm = TRUE) / 1e9,
    
    # "Saved" banks (rough estimate: at_risk banks that didn't actually borrow)
    banks_stabilized = sum(at_risk & btfp_acute == 0, na.rm = TRUE)
  )

cat("\n=== COUNTERFACTUAL WELFARE ANALYSIS ===\n")
## 
## === COUNTERFACTUAL WELFARE ANALYSIS ===
cat("Total banks in sample:", welfare_summary$total_banks, "\n")
## Total banks in sample: 4292
cat("Banks at elevated run risk (>10% predicted):", welfare_summary$banks_at_risk, "\n")
## Banks at elevated run risk (>10% predicted): 152
cat("Mean run probability WITHOUT BTFP:", round(welfare_summary$mean_prob_no_btfp * 100, 2), "%\n")
## Mean run probability WITHOUT BTFP: 2.33 %
cat("Mean borrow probability WITH BTFP:", round(welfare_summary$mean_prob_with_btfp * 100, 2), "%\n")
## Mean borrow probability WITH BTFP: 10.79 %
cat("\nTotal assets in sample: $", round(welfare_summary$total_assets_bn, 0), " billion\n")
## 
## Total assets in sample: $ 11  billion
cat("Assets held by at-risk banks: $", round(welfare_summary$assets_at_risk_bn, 0), " billion\n")
## Assets held by at-risk banks: $ 6  billion
cat("Banks stabilized (at risk but didn't need to borrow):", welfare_summary$banks_stabilized, "\n")
## Banks stabilized (at risk but didn't need to borrow): 105
# Save welfare summary with consistent formatting
welfare_df <- tibble(
  Metric = c("Total Banks", "Banks at Risk (>10% prob)", "Mean Prob WITHOUT BTFP", 
             "Mean Prob WITH BTFP", "Total Assets (B)", "Assets at Risk (B)", 
             "Banks Stabilized"),
  Value = c(as.character(welfare_summary$total_banks), 
            as.character(welfare_summary$banks_at_risk),
            sprintf("%.2f%%", welfare_summary$mean_prob_no_btfp * 100),
            sprintf("%.2f%%", welfare_summary$mean_prob_with_btfp * 100),
            sprintf("$%.0f", welfare_summary$total_assets_bn),
            sprintf("$%.0f", welfare_summary$assets_at_risk_bn),
            as.character(welfare_summary$banks_stabilized))
)

save_simple_latex_table(
  welfare_df,
  "Table_Welfare_Counterfactual",
  title_text = "Welfare Counterfactual: BTFP Stabilization Effect",
  notes_text = "This table reports counterfactual analysis of BTFP's stabilization effect during the acute crisis period (March 13--May 1, 2023). Banks at risk are those with predicted run probability exceeding 10\\% based on pre-BTFP Discount Window borrowing patterns (March 8--12, 2023). Run probability WITHOUT BTFP is estimated from a probit model of DW borrowing on MTM losses, uninsured leverage, and bank controls, then applied to the post-BTFP sample. Run probability WITH BTFP is the actual BTFP borrowing rate. Banks stabilized are at-risk banks that did not need emergency borrowing after BTFP announcement, indicating the policy reduced run incentives. The sample excludes G-SIBs and failed institutions (SVB, Signature, First Republic, Silvergate). Bank characteristics are from 2022Q4 Call Reports.",
  col_names = c("Metric", "Value")
)
## Saved: Table_Welfare_Counterfactual .tex
# ==============================================================================
# STEP 4: Distribution of Stabilization Effect
# ==============================================================================

stabilization_by_risk <- df_acute %>%
  filter(at_risk == TRUE) %>%
  group_by(run_risk_cat) %>%
  summarise(
    n_at_risk = n(),
    mean_prob_diff = mean(run_prevented, na.rm = TRUE),
    total_assets_bn = sum(total_asset, na.rm = TRUE) / 1e9,
    .groups = "drop"
  )

cat("\n=== STABILIZATION BY RISK CATEGORY ===\n")
## 
## === STABILIZATION BY RISK CATEGORY ===
print(stabilization_by_risk)
## # A tibble: 4 × 4
##   run_risk_cat             n_at_risk mean_prob_diff total_assets_bn
##   <chr>                        <int>          <dbl>           <dbl>
## 1 R1: Low MTM, Low Unins           7         -0.142           0.239
## 2 R2: Low MTM, High Unins         65         -0.172           2.90 
## 3 R3: High MTM, Low Unins         13         -0.182           0.456
## 4 R4: High MTM, High Unins        67         -0.239           2.83
# Save stabilization by risk with consistent formatting
save_simple_latex_table(
  stabilization_by_risk,
  "Table_Stabilization_by_Risk",
  title_text = "BTFP Stabilization Effect by Risk Category",
  notes_text = "This table reports the distribution of BTFP's stabilization effect across risk categories during the acute crisis period (March 13--May 1, 2023). Risk categories are constructed from median splits of MTM losses (percentage of total assets) and uninsured deposit leverage (uninsured deposits to total assets). R1 = Low MTM, Low Uninsured; R2 = Low MTM, High Uninsured; R3 = High MTM, Low Uninsured; R4 = High MTM, High Uninsured. Mean Prob Diff equals predicted run probability without BTFP (from pre-BTFP DW borrowing model) minus actual borrow probability with BTFP. Positive values indicate stabilization (reduced run probability due to policy intervention). Assets (billion USD) represents total assets of at-risk banks in each category. The sample excludes G-SIBs and failed institutions. Bank characteristics are from 2022Q4 Call Reports.",
  col_names = c("Risk Category", "N At Risk", "Mean Prob Diff", "Assets (B)"),
  digits = 3
)
## Saved: Table_Stabilization_by_Risk .tex

16.2 Counterfactual Visualization

# ==============================================================================
# FIGURE: BTFP STABILIZATION EFFECT
# ==============================================================================

# Filter for complete cases
df_counterfactual_plot <- df_acute %>%
  filter(!is.na(prob_run_no_btfp), !is.na(prob_borrow_btfp), !is.na(run_risk_cat)) %>%
  mutate(risk_category = factor(run_risk_cat))

p_counterfactual <- df_counterfactual_plot %>%
  ggplot(aes(x = prob_run_no_btfp, y = prob_borrow_btfp, color = run_risk_cat)) +
  geom_point(alpha = 0.4, size = 2) +
  geom_abline(slope = 1, intercept = 0, linetype = "dashed", color = "gray40") +
  scale_color_manual(values = c(
    "R1: Low MTM, Low Unins" = "#4575B4",
    "R2: Low MTM, High Unins" = "#91BFDB",
    "R3: High MTM, Low Unins" = "#FC8D59",
    "R4: High MTM, High Unins" = "#D73027"
  )) +
  labs(
    title = "Figure: BTFP Stabilization Effect — Counterfactual Analysis",
    subtitle = "Points below 45° line = run probability reduced by BTFP",
    x = "Predicted Run Probability WITHOUT BTFP",
    y = "Actual Borrow Probability WITH BTFP",
    color = "Risk Category",
    caption = "Note: Diagonal = no BTFP effect. Below diagonal = BTFP reduced run risk."
  ) +
  theme_minimal(base_size = 14) +
  theme(legend.position = "bottom",
        plot.title = element_text(face = "bold"))

print(p_counterfactual)

save_figure(p_counterfactual, "Figure_BTFP_Counterfactual", width = 12, height = 8)
## Saved: Figure_BTFP_Counterfactual (PDF + PNG)

17 PILLAR 4: CONTAGION CHANNEL

# ==============================================================================
# PILLAR 4: CONTAGION CHANNEL TEST
# ==============================================================================

cat("\n", paste(rep("=", 80), collapse = ""), "\n")
## 
##  ================================================================================
cat("PILLAR 4: CONTAGION CHANNEL\n")
## PILLAR 4: CONTAGION CHANNEL
cat(paste(rep("=", 80), collapse = ""), "\n")
## ================================================================================
# ==============================================================================
# Hypothesis: Banks in SVB's geographic network had elevated run risk
# even after controlling for their own fundamentals
# ==============================================================================

# Create SVB exposure variable
# Check if state variable exists, if not merge from call_q
if (!"state" %in% names(df_acute)) {
  df_acute <- df_acute %>%
    left_join(
      call_q %>% 
        filter(period == BASELINE_MAIN) %>% 
        select(idrssd, state) %>% 
        distinct(),
      by = "idrssd"
    )
}

df_acute <- df_acute %>%
  mutate(
    # SVB-adjacent regions (tech hubs, VC-heavy areas)
    svb_region = as.integer(state %in% c("CA", "WA", "OR", "MA", "NY", "CO")),
    
    # Interaction: SVB region × Run risk factors
    svb_x_mtm = svb_region * mtm_total,
    svb_x_uninsured = svb_region * uninsured_lev
  )

cat("\nSVB Region Distribution:\n")
## 
## SVB Region Distribution:
cat("  Banks in SVB regions:", sum(df_acute$svb_region == 1, na.rm = TRUE), "\n")
##   Banks in SVB regions: 419
cat("  Banks outside SVB regions:", sum(df_acute$svb_region == 0, na.rm = TRUE), "\n")
##   Banks outside SVB regions: 3873
# Test: Does SVB region predict borrowing BEYOND own fundamentals?
if (sum(!is.na(df_acute$svb_region) & df_acute$svb_region == 1) > 10) {
  
  m_contagion_1 <- feols(btfp_acute ~ mtm_total * uninsured_lev + ln_assets + cash_ratio,
                         data = df_acute, vcov = "hetero")
  
  m_contagion_2 <- feols(btfp_acute ~ mtm_total * uninsured_lev + svb_region + ln_assets + cash_ratio,
                         data = df_acute, vcov = "hetero")
  
  m_contagion_3 <- feols(btfp_acute ~ mtm_total * uninsured_lev + svb_region + svb_x_mtm + ln_assets + cash_ratio,
                         data = df_acute, vcov = "hetero")
  
  contagion_models <- list(
    "(1) Base" = m_contagion_1,
    "(2) + SVB Region" = m_contagion_2,
    "(3) + SVB × MTM" = m_contagion_3
  )
  
  # Define coefficient map (Using standard COEF_MAP to avoid errors)
  COEF_MAP_CONTAGION <- c(COEF_MAP, 
                          "svb_region" = "SVB Region (CA, WA, OR, MA, NY, CO)",
                          "svb_x_mtm" = "SVB Region × MTM")
  
  # 1. Save table (Internal - does not display)
  save_reg_table(contagion_models, "Table_Contagion_Channel",
                 title_text = "Contagion Channel: SVB Geographic Proximity and Run Risk",
                 notes_text = "This table reports Linear Probability Model (LPM) estimates testing information-based contagion from SVB's failure. The dependent variable equals one if the bank accessed BTFP during the acute crisis period (March 13--May 1, 2023). SVB Region is an indicator for states with high SVB exposure (CA, WA, OR, MA, NY, CO), representing technology hubs and venture capital-concentrated areas. Column (1) includes baseline run risk factors. Column (2) adds the SVB Region indicator. Column (3) adds the interaction between SVB Region and MTM losses. If contagion operates through information spillovers, banks in SVB-adjacent regions should have elevated borrowing beyond their own fundamentals. The sample consists of BTFP users versus pure non-users, excluding G-SIBs and failed institutions (SVB, Signature, First Republic, Silvergate). Bank characteristics are from 2022Q4 Call Reports. All continuous variables are winsorized at 2.5/97.5 percentiles and z-standardized. Heteroskedasticity-robust standard errors in parentheses.",
                 coef_map_use = COEF_MAP_CONTAGION, gof_map_use = c("nobs", "r.squared"))
  
  # 2. Print interpretation to console
  svb_coef <- coef(m_contagion_2)["svb_region"]
  if (!is.na(svb_coef)) {
    cat("\n=== CONTAGION INTERPRETATION ===\n")
    cat("SVB Region coefficient:", round(svb_coef, 4), "\n")
    if (abs(svb_coef) > 0.01) {
      cat("✓ Banks in SVB-adjacent regions had elevated run risk beyond fundamentals.\n")
      cat("  This is consistent with information-based contagion spillovers.\n")
    }
  }

  # 3. Display Table (MUST BE LAST to be visible)
  modelsummary(contagion_models,
               stars = c('*' = 0.1, '**' = 0.05, '***' = 0.01),
               coef_map = COEF_MAP_CONTAGION,
               gof_map = c("nobs", "r.squared"),
               title = "Table: Contagion Channel - SVB Geographic Proximity",
               output = "kableExtra") %>% 
    kable_styling(bootstrap_options = c("striped", "hover"), font_size = 9)

} else {
  cat("\nInsufficient data for contagion analysis (too few banks in SVB region).\n")
}
## Saved: Table_Contagion_Channel (HTML + LaTeX)
## 
## === CONTAGION INTERPRETATION ===
## SVB Region coefficient: 0.0335 
## ✓ Banks in SVB-adjacent regions had elevated run risk beyond fundamentals.
##   This is consistent with information-based contagion spillovers.
Table: Contagion Channel - SVB Geographic Proximity
&nbsp;(1) Base &nbsp;(2) + SVB Region &nbsp;(3) + SVB × MTM
MTM Loss (z) 0.027*** 0.026*** 0.026***
(0.005) (0.005) (0.005)
Uninsured Lev (z) 0.020*** 0.020*** 0.020***
(0.006) (0.006) (0.006)
MTM × Uninsured 0.017*** 0.017*** 0.017***
(0.004) (0.004) (0.004)
Log(Assets) (z) 0.048*** 0.046*** 0.046***
(0.006) (0.006) (0.006)
Cash Ratio (z) −0.021*** −0.022*** −0.022***
(0.004) (0.004) (0.004)
SVB Region (CA, WA, OR, MA, NY, CO) 0.034* 0.034*
(0.019) (0.019)
SVB Region × MTM −0.004
(0.016)
Num.Obs. 4282 4282 4282
R2 0.058 0.059 0.059
* p < 0.1, ** p < 0.05, *** p < 0.01

18 SOLVENT VS INSOLVENT: BORROWER VS NON-BORROWER COMPARISON

# ==============================================================================
# SOLVENT VS INSOLVENT: BORROWER VS NON-BORROWER COMPARISON
# ==============================================================================

cat("\n", paste(rep("=", 80), collapse = ""), "\n")

================================================================================

cat("SOLVENCY COMPARISON: BORROWERS VS NON-BORROWERS\n")

SOLVENCY COMPARISON: BORROWERS VS NON-BORROWERS

cat(paste(rep("=", 80), collapse = ""), "\n")

================================================================================

# ============================================================================
# STEP 1: Define Borrower Groups
# ============================================================================
# Variables already exist in df_acute from construct_analysis_vars():
#   - adjusted_equity_raw: book_equity_to_total_asset - mtm_loss_to_total_asset
#   - idcr_1_raw: (mm_asset - 0.5*uninsured - insured) / insured  [50% run]
#   - idcr_2_raw: (mm_asset - 1.0*uninsured - insured) / insured  [100% run]
#   - mtm_insolvent: adjusted_equity < 0
#   - insolvent_idcr_s50: idcr_1 < 0
#   - insolvent_idcr_s100: idcr_2 < 0

df_solvency <- df_acute %>%
  mutate(
      mtm_total_raw = mtm_loss_to_total_asset,
      mtm_btfp_raw = mtm_loss_omo_eligible_to_total_asset,
      mtm_other_raw = mtm_loss_non_omo_eligible_to_total_asset,
      uninsured_lev_raw = uninsured_deposit_to_total_asset,
      uninsured_share_raw = uninsured_to_deposit,
      mm_asset = mm_asset,
      
      ln_assets_raw = log(total_asset),
      cash_ratio_raw = cash_to_total_asset,
      securities_ratio_raw = security_to_total_asset,
      loan_ratio_raw = total_loan_to_total_asset,
      book_equity_ratio_raw = book_equity_to_total_asset,
      tier1_ratio_raw = tier1cap_to_total_asset,
      roa_raw = roa,
      fhlb_ratio_raw = fhlb_to_total_asset,
      loan_to_deposit_raw = loan_to_deposit,
      wholesale_raw = safe_div(
        fed_fund_purchase + repo + replace_na(other_borrowed_less_than_1yr, 0),
        total_liability, 0
      ) * 100,
      
     # Deposit outflows 
    uninsured_outflow_raw = change_uninsured_fwd_q,
    insured_outflow_raw = change_insured_deposit_fwd_q,
    total_outflow_raw = change_total_deposit_fwd_q,
         
      
      
      # Jiang et al. insolvency measures
      adjusted_equity_raw = book_equity_to_total_asset - mtm_loss_to_total_asset,
      mv_adjustment_raw = if_else(mm_asset == 0 | is.na(mm_asset), NA_real_, (total_asset / mm_asset) - 1),
      idcr_1_raw = safe_div(mm_asset - 0.5 * uninsured_deposit - insured_deposit, insured_deposit),
      idcr_2_raw = safe_div(mm_asset - 1.0 * uninsured_deposit - insured_deposit, insured_deposit),
      insolvency_1_raw = safe_div((total_asset - total_liability) - 0.5 * uninsured_deposit * mv_adjustment_raw, total_asset),
      insolvency_2_raw = safe_div((total_asset - total_liability) - 1.0 * uninsured_deposit * mv_adjustment_raw, total_asset),
      
      # ========================================================================
      # WINSORIZED VARIABLES (for outlier control)
      # ========================================================================
      mtm_total_w = winsorize(mtm_total_raw),
      mtm_btfp_w = winsorize(mtm_btfp_raw),
      mtm_other_w = winsorize(mtm_other_raw),
      uninsured_lev_w = winsorize(uninsured_lev_raw),
      uninsured_share_w = winsorize(uninsured_share_raw),
      
      ln_assets_w = winsorize(ln_assets_raw),
      cash_ratio_w = winsorize(cash_ratio_raw),
      securities_ratio_w = winsorize(securities_ratio_raw),
      loan_ratio_w = winsorize(loan_ratio_raw),
      book_equity_ratio_w = winsorize(book_equity_ratio_raw),
      tier1_ratio_w = winsorize(tier1_ratio_raw),
      roa_w = winsorize(roa_raw),
      fhlb_ratio_w = winsorize(fhlb_ratio_raw),
      loan_to_deposit_w = winsorize(loan_to_deposit_raw),
      wholesale_w = winsorize(wholesale_raw),
      
      # ========================================================================
      # Z-SCORE STANDARDIZED VARIABLES (for economic interpretation)
      # ========================================================================
      mtm_total = standardize_z(mtm_total_w),
      mtm_btfp = standardize_z(mtm_btfp_w),
      mtm_other = standardize_z(mtm_other_w),
      uninsured_lev = standardize_z(uninsured_lev_w),
      uninsured_share = standardize_z(uninsured_share_w),
      
      # Interaction term (standardized)
      mtm_x_uninsured = mtm_total * uninsured_lev,
      
      # Controls (standardized)
      ln_assets = standardize_z(ln_assets_w),
      cash_ratio = standardize_z(cash_ratio_w),
      securities_ratio = standardize_z(securities_ratio_w),
      loan_ratio = standardize_z(loan_ratio_w),
      book_equity_ratio = standardize_z(book_equity_ratio_w),
      tier1_ratio = standardize_z(tier1_ratio_w),
      roa = standardize_z(roa_w),
      fhlb_ratio = standardize_z(fhlb_ratio_w),
      loan_to_deposit = standardize_z(loan_to_deposit_w),
      wholesale = standardize_z(wholesale_w),
    
      # Deposit outflows 
      uninsured_outflow = standardize_z(winsorize(uninsured_outflow_raw)),
      insured_outflow = standardize_z(winsorize(insured_outflow_raw)),
      total_outflow = standardize_z(winsorize(total_outflow_raw)),        
    
      
      # Jiang et al. measures (winsorized)
      adjusted_equity = winsorize(adjusted_equity_raw),
      mv_adjustment = winsorize(mv_adjustment_raw),
      idcr_1 = winsorize(idcr_1_raw),
      idcr_2 = winsorize(idcr_2_raw),
      insolvency_1 = winsorize(insolvency_1_raw),
      insolvency_2 = winsorize(insolvency_2_raw),
      
      # Insolvency indicators
      mtm_insolvent = as.integer(adjusted_equity < 0),
      insolvent_idcr_s50 = as.integer(idcr_1 < 0),
      insolvent_idcr_s100 = as.integer(idcr_2 < 0),
      insolvent_cap_s50 = as.integer(insolvency_1 < 0),
      insolvent_cap_s100 = as.integer(insolvency_2 < 0),
    
    any_borrower = as.integer(btfp_acute == 1 | dw_acute == 1),
    btfp_only = as.integer(btfp_acute == 1 & dw_acute == 0),
    dw_only = as.integer(btfp_acute == 0 & dw_acute == 1),
    both_facilities = as.integer(btfp_acute == 1 & dw_acute == 1)
  )

# ============================================================================
# STEP 2: Summary Statistics by Borrower Status
# ============================================================================

cat("\n=== SUMMARY STATISTICS BY BORROWER STATUS ===\n")

=== SUMMARY STATISTICS BY BORROWER STATUS ===

summary_by_borrower <- df_solvency %>%
  group_by(any_borrower) %>%
  summarise(
    N = n(),
    
    # MTM-Adjusted Equity (raw, not winsorized for summary stats)
    mean_adj_equity = mean(adjusted_equity_raw, na.rm = TRUE),
    sd_adj_equity = sd(adjusted_equity_raw, na.rm = TRUE),
    pct_mtm_insolvent = mean(mtm_insolvent, na.rm = TRUE) * 100,
    
    # IDCR measures
    mean_idcr_50 = mean(idcr_1_raw, na.rm = TRUE),
    mean_idcr_100 = mean(idcr_2_raw, na.rm = TRUE),
    pct_insolvent_idcr_50 = mean(insolvent_idcr_s50, na.rm = TRUE) * 100,
    pct_insolvent_idcr_100 = mean(insolvent_idcr_s100, na.rm = TRUE) * 100,
    
    # Key characteristics
    mean_mtm_loss = mean(mtm_total_raw, na.rm = TRUE),
    mean_uninsured = mean(uninsured_lev_raw, na.rm = TRUE),
    mean_assets_mil = mean(total_asset, na.rm = TRUE) / 1e6,
    
    .groups = "drop"
  ) %>%
  mutate(Group = if_else(any_borrower == 1, "Borrowers", "Non-Borrowers")) %>%
  select(Group, everything(), -any_borrower)

print(summary_by_borrower)

19 A tibble: 2 × 12

Group N mean_adj_equity sd_adj_equity pct_mtm_insolvent mean_idcr_50 1 Non-Borrow… 3531 5.13 10.1 18.1 2.01 2 Borrowers 761 2.55 4.20 24.8 0.361 # ℹ 6 more variables: mean_idcr_100 , pct_insolvent_idcr_50 , # pct_insolvent_idcr_100 , mean_mtm_loss , mean_uninsured , # mean_assets_mil

# ============================================================================
# STEP 3: Statistical Tests
# ============================================================================

cat("\n=== STATISTICAL TESTS: BORROWERS VS NON-BORROWERS ===\n")

=== STATISTICAL TESTS: BORROWERS VS NON-BORROWERS ===

borrowers <- df_solvency %>% filter(any_borrower == 1)
non_borrowers <- df_solvency %>% filter(any_borrower == 0)

# T-test for MTM-Adjusted Equity
t_adj_eq <- t.test(borrowers$adjusted_equity_raw, non_borrowers$adjusted_equity_raw)
cat("\nMTM-Adjusted Equity:\n")

MTM-Adjusted Equity:

cat("  Borrowers mean:", round(mean(borrowers$adjusted_equity_raw, na.rm = TRUE), 3), "%\n")

Borrowers mean: 2.549 %

cat("  Non-borrowers mean:", round(mean(non_borrowers$adjusted_equity_raw, na.rm = TRUE), 3), "%\n")

Non-borrowers mean: 5.13 %

cat("  Difference:", round(t_adj_eq$estimate[1] - t_adj_eq$estimate[2], 3), "%\n")

Difference: -2.581 %

cat("  t-stat:", round(t_adj_eq$statistic, 2), ", p-value:", format.pval(t_adj_eq$p.value, digits = 4), "\n")

t-stat: -11.29 , p-value: < 0.00000000000000022

# T-test for IDCR (50% run)
t_idcr_50 <- t.test(borrowers$idcr_1_raw, non_borrowers$idcr_1_raw)
cat("\nIDCR (50% run):\n")

IDCR (50% run):

cat("  Borrowers mean:", round(mean(borrowers$idcr_1_raw, na.rm = TRUE), 3), "\n")

Borrowers mean: 0.361

cat("  Non-borrowers mean:", round(mean(non_borrowers$idcr_1_raw, na.rm = TRUE), 3), "\n")

Non-borrowers mean: 2.011

cat("  t-stat:", round(t_idcr_50$statistic, 2), ", p-value:", format.pval(t_idcr_50$p.value, digits = 4), "\n")

t-stat: -2.36 , p-value: 0.01811

# T-test for IDCR (100% run)
t_idcr_100 <- t.test(borrowers$idcr_2_raw, non_borrowers$idcr_2_raw)
cat("\nIDCR (100% run):\n")

IDCR (100% run):

cat("  Borrowers mean:", round(mean(borrowers$idcr_2_raw, na.rm = TRUE), 3), "\n")

Borrowers mean: 0.083

cat("  Non-borrowers mean:", round(mean(non_borrowers$idcr_2_raw, na.rm = TRUE), 3), "\n")

Non-borrowers mean: 1.6

cat("  t-stat:", round(t_idcr_100$statistic, 2), ", p-value:", format.pval(t_idcr_100$p.value, digits = 4), "\n")

t-stat: -2.24 , p-value: 0.02539

# Chi-square tests for insolvency rates
cat("\nInsolvency Rate Comparison (Chi-square tests):\n")

Insolvency Rate Comparison (Chi-square tests):

chisq_mtm <- chisq.test(table(df_solvency$any_borrower, df_solvency$mtm_insolvent))
cat("  MTM Insolvency: χ² =", round(chisq_mtm$statistic, 2), 
    ", p =", format.pval(chisq_mtm$p.value, digits = 4), "\n")

MTM Insolvency: χ² = 18.02 , p = 0.00002186

chisq_idcr50 <- chisq.test(table(df_solvency$any_borrower, df_solvency$insolvent_idcr_s50))
cat("  IDCR<0 (50% run): χ² =", round(chisq_idcr50$statistic, 2), 
    ", p =", format.pval(chisq_idcr50$p.value, digits = 4), "\n")

IDCR<0 (50% run): χ² = 0.79 , p = 0.3747

chisq_idcr100 <- chisq.test(table(df_solvency$any_borrower, df_solvency$insolvent_idcr_s100))
cat("  IDCR<0 (100% run): χ² =", round(chisq_idcr100$statistic, 2), 
    ", p =", format.pval(chisq_idcr100$p.value, digits = 4), "\n")

IDCR<0 (100% run): χ² = 1.11 , p = 0.2918

# ============================================================================
# STEP 4: Panel A - Borrowers vs Non-Borrowers
# ============================================================================

# Helper for significance stars
get_stars <- function(p) {
  case_when(p < 0.01 ~ "***", p < 0.05 ~ "**", p < 0.10 ~ "*", TRUE ~ "")
}

panel_a <- df_solvency %>%
  group_by(Borrower_Status = if_else(any_borrower == 1, "Any Fed Borrower", "Non-Borrower")) %>%
  summarise(
    N = as.character(n()),
    `Adj. Equity (%)` = sprintf("%.2f (%.2f)", 
                                 mean(adjusted_equity_raw, na.rm = TRUE),
                                 sd(adjusted_equity_raw, na.rm = TRUE)),
    `% MTM Insolvent` = sprintf("%.1f%%", mean(mtm_insolvent, na.rm = TRUE) * 100),
    `% IDCR<0 (50%%)` = sprintf("%.1f%%", mean(insolvent_idcr_s50, na.rm = TRUE) * 100),
    `% IDCR<0 (100%%)` = sprintf("%.1f%%", mean(insolvent_idcr_s100, na.rm = TRUE) * 100),
    .groups = "drop"
  )

# Add difference row with significance
diff_row <- tibble(
  Borrower_Status = "Difference",
  N = "",
  `Adj. Equity (%)` = sprintf("%.2f%s", 
    mean(borrowers$adjusted_equity_raw, na.rm = TRUE) - 
    mean(non_borrowers$adjusted_equity_raw, na.rm = TRUE),
    get_stars(t_adj_eq$p.value)),
  `% MTM Insolvent` = sprintf("%.1f pp%s", 
    (mean(borrowers$mtm_insolvent, na.rm = TRUE) - 
     mean(non_borrowers$mtm_insolvent, na.rm = TRUE)) * 100,
    get_stars(chisq_mtm$p.value)),
  `% IDCR<0 (50%%)` = sprintf("%.1f pp%s", 
    (mean(borrowers$insolvent_idcr_s50, na.rm = TRUE) - 
     mean(non_borrowers$insolvent_idcr_s50, na.rm = TRUE)) * 100,
    get_stars(chisq_idcr50$p.value)),
  `% IDCR<0 (100%%)` = sprintf("%.1f pp%s", 
    (mean(borrowers$insolvent_idcr_s100, na.rm = TRUE) - 
     mean(non_borrowers$insolvent_idcr_s100, na.rm = TRUE)) * 100,
    get_stars(chisq_idcr100$p.value))
)

panel_a_full <- bind_rows(panel_a, diff_row)

panel_a_full %>%
  kable(format = "html",
        caption = "Panel A: Solvency Comparison — Borrowers vs Non-Borrowers",
        align = c("l", "r", "r", "r", "r", "r")) %>%
  kable_styling(bootstrap_options = c("striped", "hover"), font_size = 10) %>%
  row_spec(3, bold = TRUE, background = "#f0f0f0")
Panel A: Solvency Comparison — Borrowers vs Non-Borrowers
Borrower_Status N Adj. Equity (%) % MTM Insolvent % IDCR<0 (50%%) % IDCR<0 (100%%)
Any Fed Borrower 761 2.55 (4.20) 24.8% 4.9% 30.1%
Non-Borrower 3531 5.13 (10.13) 18.1% 4.1% 28.1%
Difference -2.58*** 6.8 pp*** 0.8 pp 2.0 pp
# ============================================================================
# STEP 5: Panel B - By Facility Type
# ============================================================================

panel_b <- df_solvency %>%
  mutate(
    Facility = case_when(
      btfp_only == 1 ~ "BTFP Only",
      dw_only == 1 ~ "DW Only",
      both_facilities == 1 ~ "Both",
      TRUE ~ "Non-Borrower"
    )
  ) %>%
  group_by(Facility) %>%
  summarise(
    N = n(),
    `Adj. Equity (%)` = round(mean(adjusted_equity_raw, na.rm = TRUE), 2),
    `% Solvent (MTM)` = round((1 - mean(mtm_insolvent, na.rm = TRUE)) * 100, 1),
    `% Solvent (50%%)` = round((1 - mean(insolvent_idcr_s50, na.rm = TRUE)) * 100, 1),
    `% Solvent (100%%)` = round((1 - mean(insolvent_idcr_s100, na.rm = TRUE)) * 100, 1),
    `MTM Loss (%)` = round(mean(mtm_total_raw, na.rm = TRUE), 2),
    `Uninsured (%)` = round(mean(uninsured_lev_raw, na.rm = TRUE), 2),
    .groups = "drop"
  ) %>%
  arrange(factor(Facility, levels = c("BTFP Only", "DW Only", "Both", "Non-Borrower")))

panel_b %>%
  kable(format = "html",
        caption = "Panel B: Solvency by Facility Type",
        align = c("l", rep("r", 7))) %>%
  kable_styling(bootstrap_options = c("striped", "hover"), font_size = 10)
Panel B: Solvency by Facility Type
Facility N Adj. Equity (%) % Solvent (MTM) % Solvent (50%%) % Solvent (100%%) MTM Loss (%) Uninsured (%)
BTFP Only 368 2.00 69.6 96.5 70.9 6.18 26.21
DW Only 299 3.30 80.9 93.0 66.9 5.75 26.85
Both 94 2.33 78.7 96.8 75.5 5.86 32.63
Non-Borrower 3531 5.13 81.9 95.9 71.9 5.36 22.83
# ============================================================================
# STEP 6: Generate LaTeX Table
# ============================================================================

latex_table <- paste0(
"\\begin{table}[htbp]
\\centering
\\caption{Solvency Comparison: Emergency Facility Borrowers vs. Non-Borrowers}
\\label{tab:solvency_comparison}

\\vspace{0.5em}
\\begin{threeparttable}
\\small

\\textbf{Panel A: Borrowers vs. Non-Borrowers}
\\vspace{0.3em}

\\begin{tabular}{@{}lrrrrr@{}}
\\toprule
Group & N & Adj. Equity (\\%) & \\% MTM Insolvent & \\% IDCR$<$0 (50\\%) & \\% IDCR$<$0 (100\\%) \\\\
\\midrule
Any Fed Borrower & ", nrow(borrowers), " & ", 
sprintf("%.2f (%.2f)", mean(borrowers$adjusted_equity_raw, na.rm=TRUE), sd(borrowers$adjusted_equity_raw, na.rm=TRUE)), " & ",
sprintf("%.1f", mean(borrowers$mtm_insolvent, na.rm=TRUE)*100), " & ",
sprintf("%.1f", mean(borrowers$insolvent_idcr_s50, na.rm=TRUE)*100), " & ",
sprintf("%.1f", mean(borrowers$insolvent_idcr_s100, na.rm=TRUE)*100), " \\\\
Non-Borrower & ", nrow(non_borrowers), " & ",
sprintf("%.2f (%.2f)", mean(non_borrowers$adjusted_equity_raw, na.rm=TRUE), sd(non_borrowers$adjusted_equity_raw, na.rm=TRUE)), " & ",
sprintf("%.1f", mean(non_borrowers$mtm_insolvent, na.rm=TRUE)*100), " & ",
sprintf("%.1f", mean(non_borrowers$insolvent_idcr_s50, na.rm=TRUE)*100), " & ",
sprintf("%.1f", mean(non_borrowers$insolvent_idcr_s100, na.rm=TRUE)*100), " \\\\
\\addlinespace[3pt]
\\textit{Difference} & & ",
sprintf("%.2f%s", 
  mean(borrowers$adjusted_equity_raw, na.rm=TRUE) - mean(non_borrowers$adjusted_equity_raw, na.rm=TRUE),
  get_stars(t_adj_eq$p.value)), " & ",
sprintf("%.1f pp%s", (mean(borrowers$mtm_insolvent, na.rm=TRUE) - mean(non_borrowers$mtm_insolvent, na.rm=TRUE))*100, get_stars(chisq_mtm$p.value)), " & ",
sprintf("%.1f pp%s", (mean(borrowers$insolvent_idcr_s50, na.rm=TRUE) - mean(non_borrowers$insolvent_idcr_s50, na.rm=TRUE))*100, get_stars(chisq_idcr50$p.value)), " & ",
sprintf("%.1f pp%s", (mean(borrowers$insolvent_idcr_s100, na.rm=TRUE) - mean(non_borrowers$insolvent_idcr_s100, na.rm=TRUE))*100, get_stars(chisq_idcr100$p.value)), " \\\\
\\bottomrule
\\end{tabular}

\\vspace{1em}
\\textbf{Panel B: By Facility Type}
\\vspace{0.3em}

\\begin{tabular}{@{}lrrrrrrr@{}}
\\toprule
Facility & N & Adj. Eq (\\%) & \\% Solvent (MTM) & \\% Solvent (50\\%) & \\% Solvent (100\\%) & MTM Loss & Uninsured \\\\
\\midrule")

# Add facility rows dynamically
facility_data <- df_solvency %>%
  mutate(
    Facility = case_when(
      btfp_only == 1 ~ "BTFP Only",
      dw_only == 1 ~ "DW Only",
      both_facilities == 1 ~ "Both",
      TRUE ~ "Non-Borrower"
    )
  ) %>%
  group_by(Facility) %>%
  summarise(
    N = n(),
    adj_eq = round(mean(adjusted_equity_raw, na.rm = TRUE), 2),
    solv_mtm = round((1 - mean(mtm_insolvent, na.rm = TRUE)) * 100, 1),
    solv_50 = round((1 - mean(insolvent_idcr_s50, na.rm = TRUE)) * 100, 1),
    solv_100 = round((1 - mean(insolvent_idcr_s100, na.rm = TRUE)) * 100, 1),
    mtm = round(mean(mtm_total_raw, na.rm = TRUE), 2),
    unins = round(mean(uninsured_lev_raw, na.rm = TRUE), 2),
    .groups = "drop"
  ) %>%
  arrange(factor(Facility, levels = c("BTFP Only", "DW Only", "Both", "Non-Borrower")))

for (i in 1:nrow(facility_data)) {
  latex_table <- paste0(latex_table, "\n",
    facility_data$Facility[i], " & ",
    facility_data$N[i], " & ",
    facility_data$adj_eq[i], " & ",
    facility_data$solv_mtm[i], "\\% & ",
    facility_data$solv_50[i], "\\% & ",
    facility_data$solv_100[i], "\\% & ",
    facility_data$mtm[i], " & ",
    facility_data$unins[i], " \\\\"
  )
}

latex_table <- paste0(latex_table, "
\\bottomrule
\\end{tabular}

\\begin{tablenotes}[flushleft]
\\footnotesize
\\item \\textit{Notes:} This table compares solvency characteristics of banks that borrowed from emergency facilities during the acute crisis period (March 13--May 1, 2023) versus non-borrowers. Panel A reports mean values (standard deviations) and insolvency rates. Panel B shows breakdown by facility type. MTM-Adjusted Equity = Book Equity/Assets $-$ MTM Loss/Assets. IDCR = (Market Value Assets $-$ $\\alpha$ $\\times$ Uninsured Deposits $-$ Insured Deposits) / Insured Deposits, where $\\alpha = 0.5$ (50\\% run) or $\\alpha = 1.0$ (100\\% run). IDCR $<$ 0 indicates insufficient assets to cover deposit run and repay insured depositors. Difference row shows Borrower $-$ Non-Borrower with significance from t-tests (continuous) and $\\chi^2$ tests (rates).
\\item $^{***}p<0.01$; $^{**}p<0.05$; $^{*}p<0.10$.
\\end{tablenotes}
\\end{threeparttable}
\\end{table}")

# Save LaTeX
writeLines(latex_table, file.path(TABLE_PATH, "Table_Solvency_Comparison_Full.tex"))
cat("\n✓ Saved: Table_Solvency_Comparison_Full.tex\n")

✓ Saved: Table_Solvency_Comparison_Full.tex

# ============================================================================
# STEP 7: Key Findings Summary
# ============================================================================

cat("\n", paste(rep("=", 80), collapse = ""), "\n")

================================================================================

cat("KEY FINDINGS: SOLVENCY COMPARISON\n")

KEY FINDINGS: SOLVENCY COMPARISON

cat(paste(rep("=", 80), collapse = ""), "\n")

================================================================================

cat("\n1. MTM-ADJUSTED EQUITY (Book Equity - MTM Loss):\n")
  1. MTM-ADJUSTED EQUITY (Book Equity - MTM Loss):
cat("   Borrowers:", round(mean(borrowers$adjusted_equity_raw, na.rm = TRUE), 2), "%\n")

Borrowers: 2.55 %

cat("   Non-borrowers:", round(mean(non_borrowers$adjusted_equity_raw, na.rm = TRUE), 2), "%\n")

Non-borrowers: 5.13 %

cat("\n2. INSOLVENCY RATES:\n")
  1. INSOLVENCY RATES:
cat("   Under 50% run scenario:\n")

Under 50% run scenario:

cat("     Borrowers:", round(mean(borrowers$insolvent_idcr_s50, na.rm = TRUE) * 100, 1), "% insolvent\n")
 Borrowers: 4.9 % insolvent
cat("     Non-borrowers:", round(mean(non_borrowers$insolvent_idcr_s50, na.rm = TRUE) * 100, 1), "% insolvent\n")
 Non-borrowers: 4.1 % insolvent
cat("   Under 100% run scenario:\n")

Under 100% run scenario:

cat("     Borrowers:", round(mean(borrowers$insolvent_idcr_s100, na.rm = TRUE) * 100, 1), "% insolvent\n")
 Borrowers: 30.1 % insolvent
cat("     Non-borrowers:", round(mean(non_borrowers$insolvent_idcr_s100, na.rm = TRUE) * 100, 1), "% insolvent\n")
 Non-borrowers: 28.1 % insolvent
cat("\n3. INTERPRETATION:\n")
  1. INTERPRETATION:
solv_rate_50 <- (1 - mean(borrowers$insolvent_idcr_s50, na.rm = TRUE)) * 100
if (solv_rate_50 > 90) {
  cat("   ✓ Under 50% run scenario,", round(solv_rate_50, 1), "% of borrowers were SOLVENT.\n")
  cat("   ✓ Emergency facilities addressed LIQUIDITY crises, not insolvency.\n")
}

✓ Under 50% run scenario, 95.1 % of borrowers were SOLVENT. ✓ Emergency facilities addressed LIQUIDITY crises, not insolvency.

20 SESSION INFO

sessionInfo()
## R version 4.3.1 (2023-06-16 ucrt)
## Platform: x86_64-w64-mingw32/x64 (64-bit)
## Running under: Windows 11 x64 (build 26100)
## 
## Matrix products: default
## 
## 
## locale:
## [1] LC_COLLATE=English_United States.utf8 
## [2] LC_CTYPE=English_United States.utf8   
## [3] LC_MONETARY=English_United States.utf8
## [4] LC_NUMERIC=C                          
## [5] LC_TIME=English_United States.utf8    
## 
## time zone: America/Chicago
## tzcode source: internal
## 
## attached base packages:
## [1] stats     graphics  grDevices utils     datasets  methods   base     
## 
## other attached packages:
##  [1] marginaleffects_0.25.1 car_3.1-2              carData_3.0-5         
##  [4] haven_2.5.4            broom_1.0.8            patchwork_1.3.0       
##  [7] scales_1.3.0           kableExtra_1.4.0       modelsummary_2.3.0    
## [10] fixest_0.12.1          data.table_1.17.0      lubridate_1.9.4       
## [13] forcats_1.0.0          stringr_1.5.1          dplyr_1.1.4           
## [16] purrr_1.0.2            readr_2.1.5            tidyr_1.3.1           
## [19] tibble_3.2.1           ggplot2_3.5.1          tidyverse_2.0.0       
## 
## loaded via a namespace (and not attached):
##  [1] tidyselect_1.2.1    viridisLite_0.4.2   farver_2.1.1       
##  [4] fastmap_1.1.1       bayestestR_0.17.0   digest_0.6.33      
##  [7] timechange_0.3.0    lifecycle_1.0.4     dreamerr_1.4.0     
## [10] magrittr_2.0.3      compiler_4.3.1      rlang_1.1.6        
## [13] sass_0.4.9          tools_4.3.1         utf8_1.2.4         
## [16] yaml_2.3.7          knitr_1.50          labeling_0.4.3     
## [19] bit_4.0.5           xml2_1.3.6          abind_1.4-8        
## [22] tinytable_0.15.1    withr_2.5.2         numDeriv_2016.8-1.1
## [25] grid_4.3.1          datawizard_1.3.0    fansi_1.0.5        
## [28] colorspace_2.1-0    insight_1.4.3       cli_3.6.5          
## [31] rmarkdown_2.29      crayon_1.5.3        ragg_1.3.0         
## [34] generics_0.1.3      rstudioapi_0.16.0   performance_0.15.3 
## [37] tzdb_0.4.0          parameters_0.28.3   cachem_1.0.8       
## [40] parallel_4.3.1      stringmagic_1.1.2   vctrs_0.6.4        
## [43] sandwich_3.1-1      jsonlite_1.8.7      litedown_0.7       
## [46] hms_1.1.3           bit64_4.0.5         Formula_1.2-5      
## [49] systemfonts_1.0.6   jquerylib_0.1.4     glue_1.8.0         
## [52] stringi_1.7.12      gtable_0.3.6        tables_0.9.31      
## [55] munsell_0.5.0       pillar_1.9.0        htmltools_0.5.9    
## [58] R6_2.5.1            textshaping_0.3.7   vroom_1.6.5        
## [61] evaluate_0.23       lattice_0.21-8      backports_1.4.1    
## [64] bslib_0.5.1         Rcpp_1.0.12         svglite_2.1.3      
## [67] nlme_3.1-162        checkmate_2.3.1     xfun_0.54          
## [70] zoo_1.8-12          pkgconfig_2.0.3