Load packages

library(lavaan)
## This is lavaan 0.6-18
## lavaan is FREE software! Please report any bugs.
library(tidyverse)
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr     1.1.4     ✔ readr     2.1.5
## ✔ forcats   1.0.0     ✔ stringr   1.5.1
## ✔ ggplot2   3.5.1     ✔ tibble    3.2.1
## ✔ lubridate 1.9.4     ✔ tidyr     1.3.1
## ✔ purrr     1.0.4
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag()    masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors

Load data

data <- read.csv("~/Google drive/My Drive/YEAR 2/PROJECTS/DEREK/Status Distribution/Studies/Study 2: Status Affordance/raw_study2_pilotdata_8.18.25.csv")

Exclusions

data <- data %>% 
  slice(-c(1:2)) %>% 
  filter(attn_bots != "14285733") %>% 
  filter(attn == 24)  %>%
   unite(geolocation, LocationLatitude, LocationLongitude) %>%
   group_by(geolocation) %>%
   mutate(geo_frequency = n()) %>%
   filter(geo_frequency < 3) %>%
   ungroup()

Data cleaning

Gather and clean numeric data

df_numeric <- data %>%
  select(c(ResponseId, dist_1R:attr_10, affordance1_1:affordance1_3, affordance2_1:affordance2_3, affordance3_1:affordance3_3, SZSB_1:SZSB_8R, sdo_1:sdo_8R, need.for.status_1:need.for.status_8, dom_1:prest_9R, fv_inclusivity)) %>% 
  mutate(across(-ResponseId, as.numeric))
df_numeric <- df_numeric %>%
  mutate(across(ends_with("R"), ~ 8 - ., .names = "{.col}_Recoded")) 

Create average scores

df_numeric <- df_numeric %>%
  mutate(
    status_expansion_belief = rowMeans(select(., 
      dist_1R_Recoded,
      dist_2R_Recoded, 
      dist_5R_Recoded, 
      dist_14,
      dist_15,
      dist_16,
      attr_3, 
      attr_4, 
      attr_5, 
      attr_6, 
      attr_8, 
      attr_10), 
      na.rm = TRUE)
  )
df_numeric <- df_numeric %>%
  mutate(
    dist_avg = rowMeans(select(., 
      dist_1R_Recoded,
      dist_2R_Recoded, 
      dist_5R_Recoded, 
      dist_14,
      dist_15,
      dist_16), 
      na.rm = TRUE)
  )
df_numeric <- df_numeric %>%
  mutate(
    attribute_avg = rowMeans(select(., 
      attr_3, 
      attr_4, 
      attr_5, 
      attr_6, 
      attr_8, 
      attr_10
    ), na.rm = TRUE)
  )
df_numeric <- df_numeric %>%
  mutate(
    descriptive_avg = rowMeans(select(., 
      dist_1R_Recoded,
      dist_2R_Recoded, 
      dist_5R_Recoded, 
      attr_3, 
      attr_4, 
      attr_5), 
      na.rm = TRUE)
  )
df_numeric <- df_numeric %>%
  mutate(
    prescriptive_avg = rowMeans(select(., 
      dist_14,
      dist_15,
      dist_16, 
      attr_6, 
      attr_8, 
      attr_10), 
      na.rm = TRUE)
  )
df_numeric <- df_numeric %>%
  mutate(
    szsb_avg = rowMeans(select(., 
      SZSB_1, 
      SZSB_2, 
      SZSB_3, 
      SZSB_4, 
      SZSB_5R_Recoded, 
      SZSB_6R_Recoded, 
      SZSB_7R_Recoded, 
      SZSB_8R_Recoded
    ), na.rm = TRUE)
  )
df_numeric <- df_numeric %>%
  mutate(
    nfs_avg = rowMeans(select(., 
      need.for.status_1, 
      need.for.status_2R_Recoded, 
      need.for.status_3, 
      need.for.status_4, 
      need.for.status_5, 
      need.for.status_6, 
      need.for.status_7R_Recoded, 
      need.for.status_8
    ), na.rm = TRUE)
  )
df_numeric <- df_numeric %>%
  mutate(
    sdo_avg = rowMeans(select(., 
      sdo_1, 
      sdo_2,
      sdo_3R_Recoded,
      sdo_4R_Recoded, 
      sdo_5,
      sdo_6,
      sdo_7R_Recoded,
      sdo_8R_Recoded
    ), na.rm = TRUE)
  )
df_numeric <- df_numeric %>%
  mutate(
    dominance_avg = rowMeans(select(., 
      dom_1, 
      dom_2, 
      dom_3, 
      dom_4, 
      dom_5R_Recoded, 
      dom_6, 
      dom_7R_Recoded, 
      dom_8
    ), na.rm = TRUE)
  )
df_numeric <- df_numeric %>%
  mutate(
    prestige_avg = rowMeans(select(., 
      prest_1, 
      prest_2R_Recoded, 
      prest_3, 
      prest_4R_Recoded, 
      prest_5, 
      prest_6, 
      prest_7, 
      prest_8, 
      prest_9R_Recoded
    ), na.rm = TRUE)
  )

Quick alpha check

items <- df_numeric %>% 
  select(dist_1R_Recoded,
      dist_2R_Recoded, 
      dist_5R_Recoded, 
      dist_14, 
      dist_15, 
      dist_16,
      attr_3, 
      attr_4, 
      attr_5, 
      attr_6, 
      attr_8, 
      attr_10) %>% 
  na.omit()

psych::alpha(items) # 0.81
## 
## Reliability analysis   
## Call: psych::alpha(x = items)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean   sd median_r
##       0.81      0.84     0.9       0.3 5.1 0.041  5.2 0.66     0.29
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.73  0.81  0.88
## Duhachek  0.73  0.81  0.89
## 
##  Reliability if an item is dropped:
##                 raw_alpha std.alpha G6(smc) average_r S/N alpha se var.r med.r
## dist_1R_Recoded      0.82      0.84    0.90      0.33 5.4    0.039 0.055  0.37
## dist_2R_Recoded      0.82      0.84    0.89      0.33 5.4    0.039 0.053  0.37
## dist_5R_Recoded      0.81      0.84    0.90      0.32 5.2    0.041 0.057  0.37
## dist_14              0.79      0.81    0.88      0.28 4.3    0.045 0.050  0.26
## dist_15              0.79      0.82    0.89      0.30 4.6    0.046 0.068  0.30
## dist_16              0.78      0.81    0.88      0.28 4.2    0.049 0.065  0.26
## attr_3               0.79      0.81    0.88      0.28 4.3    0.047 0.059  0.26
## attr_4               0.78      0.81    0.88      0.28 4.2    0.047 0.061  0.26
## attr_5               0.81      0.83    0.89      0.31 4.9    0.043 0.063  0.36
## attr_6               0.79      0.81    0.88      0.28 4.4    0.045 0.052  0.26
## attr_8               0.79      0.82    0.88      0.29 4.5    0.045 0.058  0.26
## attr_10              0.80      0.83    0.89      0.30 4.7    0.043 0.058  0.29
## 
##  Item statistics 
##                  n raw.r std.r r.cor r.drop mean   sd
## dist_1R_Recoded 50  0.44  0.34  0.29   0.27  3.7 1.46
## dist_2R_Recoded 50  0.47  0.37  0.33   0.30  4.1 1.53
## dist_5R_Recoded 50  0.49  0.40  0.35   0.34  4.0 1.41
## dist_14         50  0.65  0.72  0.72   0.57  5.7 0.96
## dist_15         50  0.62  0.61  0.55   0.52  4.9 1.17
## dist_16         50  0.74  0.75  0.72   0.67  5.2 1.11
## attr_3          50  0.69  0.74  0.72   0.62  5.8 0.98
## attr_4          50  0.72  0.76  0.75   0.65  5.7 1.00
## attr_5          50  0.48  0.53  0.48   0.38  5.6 0.92
## attr_6          50  0.64  0.70  0.70   0.55  5.8 1.06
## attr_8          50  0.62  0.67  0.65   0.53  5.8 1.02
## attr_10         50  0.52  0.58  0.54   0.42  5.7 0.97
## 
## Non missing response frequency for each item
##                    1    2    3    4    5    6    7 miss
## dist_1R_Recoded 0.04 0.16 0.34 0.10 0.24 0.10 0.02    0
## dist_2R_Recoded 0.04 0.16 0.16 0.14 0.32 0.16 0.02    0
## dist_5R_Recoded 0.02 0.16 0.20 0.22 0.26 0.12 0.02    0
## dist_14         0.00 0.02 0.00 0.06 0.28 0.48 0.16    0
## dist_15         0.00 0.04 0.10 0.16 0.38 0.28 0.04    0
## dist_16         0.00 0.02 0.06 0.14 0.32 0.38 0.08    0
## attr_3          0.00 0.02 0.00 0.06 0.24 0.48 0.20    0
## attr_4          0.00 0.00 0.04 0.04 0.34 0.36 0.22    0
## attr_5          0.00 0.00 0.04 0.04 0.32 0.46 0.14    0
## attr_6          0.00 0.02 0.02 0.02 0.32 0.36 0.26    0
## attr_8          0.00 0.02 0.00 0.06 0.22 0.44 0.26    0
## attr_10         0.00 0.00 0.04 0.06 0.22 0.50 0.18    0

Status Affordance Analysis

Remove people that did not nominate at least three coworkers

df_manager <- data %>% 
  select(ResponseId, managerTenure:industry, selected1:selected3) %>% 
  filter(
    !is.na(selected1) & !(tolower(selected1) %in% c("n/a", "na")),
    !is.na(selected2) & !(tolower(selected2) %in% c("n/a", "na")),
    !is.na(selected3) & !(tolower(selected3) %in% c("n/a", "na"))
  )

df_full <- df_manager %>% 
  left_join(df_numeric, by = "ResponseId")

Create averages

df_full <- df_full %>%
  mutate(
    selected1_status_avg = rowMeans(select(., 
      affordance1_1,
      affordance1_2,
      affordance1_3), 
      na.rm = TRUE)
  )

df_full <- df_full %>%
  mutate(
    selected2_status_avg = rowMeans(select(., 
      affordance2_1,
      affordance2_2,
      affordance2_3), 
      na.rm = TRUE)
  )

df_full <- df_full %>%
  mutate(
    selected3_status_avg = rowMeans(select(., 
      affordance3_1,
      affordance3_2,
      affordance3_3), 
      na.rm = TRUE)
  )
df_long <- df_full %>%
  select(ResponseId, status_expansion_belief, selected1, selected2, selected3,
         selected1_status_avg, selected2_status_avg, selected3_status_avg) %>%
  pivot_longer(
    cols = c(selected1_status_avg, selected2_status_avg, selected3_status_avg),
    names_to = "slot",
    values_to = "affordance_score"
  ) %>%
  # extract which slot (1/2/3) we’re on
  mutate(slot_num = str_extract(slot, "\\d")) %>%
  # map the slot back to the corresponding direct report ID
  mutate(report_id = case_when(
    slot_num == "1" ~ as.character(selected1),
    slot_num == "2" ~ as.character(selected2),
    slot_num == "3" ~ as.character(selected3)
  )) %>%
  # final analysis columns
  select(ResponseId, report_id, affordance_score, status_expansion_belief)

Simple MLM

library(lme4)
## Loading required package: Matrix
## 
## Attaching package: 'Matrix'
## The following objects are masked from 'package:tidyr':
## 
##     expand, pack, unpack
library(lmerTest)
## 
## Attaching package: 'lmerTest'
## The following object is masked from 'package:lme4':
## 
##     lmer
## The following object is masked from 'package:stats':
## 
##     step
# Grand-mean center predictor for interpretability
df_long <- df_long %>%
  mutate(status_expansion_c = scale(status_expansion_belief, center = TRUE, scale = FALSE)[,1])

# Model: does status expansion predict affordance?
m1 <- lmer(affordance_score ~ status_expansion_c + (1 | ResponseId), data = df_long, REML = TRUE)
summary(m1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: affordance_score ~ status_expansion_c + (1 | ResponseId)
##    Data: df_long
## 
## REML criterion at convergence: 377.2
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -3.12464 -0.41859  0.05887  0.59894  1.98412 
## 
## Random effects:
##  Groups     Name        Variance Std.Dev.
##  ResponseId (Intercept) 0.2145   0.4631  
##  Residual               0.7134   0.8446  
## Number of obs: 138, groups:  ResponseId, 46
## 
## Fixed effects:
##                    Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)         5.31884    0.09916 44.00000  53.641  < 2e-16 ***
## status_expansion_c  0.69916    0.15911 44.00000   4.394 6.92e-05 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## stts_xpnsn_ 0.000

For each 1-unit increase in status expansion beliefs, managers are afford about 0.70 points more status to their direct reports.The effect is statistically significant (t = 4.39, p < .001).

With controls

df_long_controls <- df_full %>%
  select(ResponseId, status_expansion_belief, selected1, selected2, selected3,
         selected1_status_avg, selected2_status_avg, selected3_status_avg, szsb_avg, nfs_avg, sdo_avg, dominance_avg, prestige_avg) %>%
  pivot_longer(
    cols = c(selected1_status_avg, selected2_status_avg, selected3_status_avg),
    names_to = "slot",
    values_to = "affordance_score"
  ) %>%
  # extract which slot (1/2/3) we’re on
  mutate(slot_num = str_extract(slot, "\\d")) %>%
  # map the slot back to the corresponding direct report ID
  mutate(report_id = case_when(
    slot_num == "1" ~ as.character(selected1),
    slot_num == "2" ~ as.character(selected2),
    slot_num == "3" ~ as.character(selected3)
  )) %>%
  # final analysis columns
  select(ResponseId, report_id, affordance_score, status_expansion_belief, szsb_avg, nfs_avg, sdo_avg, dominance_avg, prestige_avg)
# Grand-mean center predictor for interpretability
df_long_controls <- df_long_controls %>%
  mutate(status_expansion_c = scale(status_expansion_belief, center = TRUE, scale = FALSE)[,1])
m2 <- lmer(affordance_score ~ status_expansion_c + szsb_avg + nfs_avg + sdo_avg + dominance_avg + prestige_avg + (1 | ResponseId), data = df_long_controls, REML = TRUE)
summary(m2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: affordance_score ~ status_expansion_c + szsb_avg + nfs_avg +  
##     sdo_avg + dominance_avg + prestige_avg + (1 | ResponseId)
##    Data: df_long_controls
## 
## REML criterion at convergence: 381.2
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.1923 -0.3988  0.0219  0.5724  2.2541 
## 
## Random effects:
##  Groups     Name        Variance Std.Dev.
##  ResponseId (Intercept) 0.1918   0.4380  
##  Residual               0.7134   0.8446  
## Number of obs: 138, groups:  ResponseId, 46
## 
## Fixed effects:
##                    Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)         7.55917    1.09235 39.00000   6.920 2.76e-08 ***
## status_expansion_c  0.66312    0.18082 39.00000   3.667  0.00073 ***
## szsb_avg           -0.35867    0.15103 39.00000  -2.375  0.02257 *  
## nfs_avg             0.15562    0.18846 39.00000   0.826  0.41396    
## sdo_avg            -0.01225    0.09631 39.00000  -0.127  0.89942    
## dominance_avg      -0.02261    0.11162 39.00000  -0.203  0.84055    
## prestige_avg       -0.37530    0.23352 39.00000  -1.607  0.11609    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) stts__ szsb_v nfs_vg sdo_vg dmnnc_
## stts_xpnsn_  0.000                                   
## szsb_avg    -0.613  0.127                            
## nfs_avg      0.010 -0.238 -0.098                     
## sdo_avg     -0.364  0.363  0.125 -0.280              
## dominanc_vg  0.154  0.024 -0.271 -0.215 -0.392       
## prestige_vg -0.682  0.054  0.330 -0.660  0.427 -0.098

The core finding (status expansion beliefs → more status affordance) is robust: the effect remains significant and meaningful even after adjusting for related constructs (SZSB, NFS, SDO, dominance, prestige). SZSB emerges as an additional significant predictor, but in the opposite direction. Other status-related constructs don’t explain unique variance in affordance once expansion beliefs are in the model.

Do high SEB people rate everyone high?

library(lmtest)
## Loading required package: zoo
## 
## Attaching package: 'zoo'
## The following objects are masked from 'package:base':
## 
##     as.Date, as.Date.numeric
library(sandwich)

# Within-rater dispersion (SD across the 3 rated targets)
rater_dispersion <- df_full %>%
  transmute(
    ResponseId,
    SEB = status_expansion_belief,
    mean_aff = rowMeans(cbind(selected1_status_avg, selected2_status_avg, selected3_status_avg), na.rm = TRUE),
    sd_aff   = apply(cbind(selected1_status_avg, selected2_status_avg, selected3_status_avg), 1, sd, na.rm = TRUE),
    iqr_aff  = apply(cbind(selected1_status_avg, selected2_status_avg, selected3_status_avg), 1, IQR, na.rm = TRUE)
  )

# Does SEB predict the *mean* AND/OR the *spread* across targets?
m_mean <- lm(mean_aff ~ scale(SEB, center=TRUE, scale=FALSE), data = rater_dispersion)
m_sd   <- lm(sd_aff   ~ scale(SEB, center=TRUE, scale=FALSE), data = rater_dispersion)
m_iqr  <- lm(iqr_aff  ~ scale(SEB, center=TRUE, scale=FALSE), data = rater_dispersion)

coeftest(m_mean, vcov = vcovHC(m_mean, type = "HC3"))
## 
## t test of coefficients:
## 
##                                          Estimate Std. Error t value  Pr(>|t|)
## (Intercept)                               5.31884    0.10200 52.1476 < 2.2e-16
## scale(SEB, center = TRUE, scale = FALSE)  0.69916    0.18718  3.7353 0.0005363
##                                             
## (Intercept)                              ***
## scale(SEB, center = TRUE, scale = FALSE) ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
coeftest(m_sd,   vcov = vcovHC(m_sd, type = "HC3"))
## 
## t test of coefficients:
## 
##                                           Estimate Std. Error t value  Pr(>|t|)
## (Intercept)                               0.652812   0.081797  7.9809 4.297e-10
## scale(SEB, center = TRUE, scale = FALSE) -0.085493   0.117319 -0.7287      0.47
##                                             
## (Intercept)                              ***
## scale(SEB, center = TRUE, scale = FALSE)    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
coeftest(m_iqr,  vcov = vcovHC(m_iqr, type = "HC3"))
## 
## t test of coefficients:
## 
##                                          Estimate Std. Error t value  Pr(>|t|)
## (Intercept)                               0.62319    0.07911  7.8775 6.053e-10
## scale(SEB, center = TRUE, scale = FALSE) -0.08488    0.11430 -0.7426    0.4617
##                                             
## (Intercept)                              ***
## scale(SEB, center = TRUE, scale = FALSE)    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

What about across items

# Long per-item data: 3 targets × 3 items = 9 rows per rater
df_items <- df_full %>%
  select(ResponseId, status_expansion_belief,
         affordance1_1:affordance1_3,
         affordance2_1:affordance2_3,
         affordance3_1:affordance3_3) %>%
  pivot_longer(cols = starts_with("affordance"),
               names_to = "which",
               values_to = "score") %>%
  mutate(
    target = str_extract(which, "(?<=affordance)\\d+(?=_)"),
    item   = str_extract(which, "(?<=_)\\d+"),
    SEB_c  = scale(status_expansion_belief, center=TRUE, scale=FALSE)[,1]
  )

# Does SEB predict each item? (random intercept for rater)
m_item <- lmer(score ~ SEB_c + item + (1|ResponseId), data = df_items, REML = TRUE)
summary(m_item)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: score ~ SEB_c + item + (1 | ResponseId)
##    Data: df_items
## 
## REML criterion at convergence: 1226.9
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3523 -0.6240  0.1740  0.6404  2.3326 
## 
## Random effects:
##  Groups     Name        Variance Std.Dev.
##  ResponseId (Intercept) 0.3466   0.5887  
##  Residual               0.9513   0.9754  
## Number of obs: 414, groups:  ResponseId, 46
## 
## Fixed effects:
##             Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)   5.6449     0.1201  92.3219  46.996  < 2e-16 ***
## SEB_c         0.6992     0.1591  44.0000   4.394 6.92e-05 ***
## item2        -0.5000     0.1174 366.0000  -4.258 2.62e-05 ***
## item3        -0.4783     0.1174 366.0000  -4.073 5.69e-05 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##       (Intr) SEB_c  item2 
## SEB_c  0.000              
## item2 -0.489  0.000       
## item3 -0.489  0.000  0.500
# Do items differ in slope? (test interaction)
m_item_int <- lmer(score ~ SEB_c*item + (1|ResponseId), data = df_items, REML = TRUE)
anova(m_item, m_item_int)  # if non-sig, slopes are similar across items
## refitting model(s) with ML (instead of REML)
## Data: df_items
## Models:
## m_item: score ~ SEB_c + item + (1 | ResponseId)
## m_item_int: score ~ SEB_c * item + (1 | ResponseId)
##            npar  AIC    BIC  logLik deviance  Chisq Df Pr(>Chisq)
## m_item        6 1229 1253.2 -608.52     1217                     
## m_item_int    8 1232 1264.2 -607.98     1216 1.0774  2     0.5835

How many nominations on average

report_cols <- paste0("report", 1:10)

df_reports <- data %>%
  # include the report columns so we can clean them
  select(ResponseId, all_of(report_cols)) %>%
  # turn "", "na", "n/a" (with any whitespace) into NA
  mutate(across(all_of(report_cols),
                ~ na_if(str_squish(as.character(.)), ""))) %>%
  mutate(across(all_of(report_cols),
                ~ ifelse(tolower(.) %in% c("na","n/a"), NA, .))) %>%
  # count non-missing reports per manager
  mutate(n_nominations = rowSums(across(all_of(report_cols), ~ !is.na(.))))

mean(df_reports$n_nominations)
## [1] 6.42

Is the distribution of SEBs skewed or normal?

# Histogram with density curve
ggplot(df_full, aes(x = status_expansion_belief)) +
  geom_histogram(aes(y = ..density..), bins = 20, fill = "grey70", color = "black") +
  geom_density(color = "blue", size = 1) +
  labs(x = "Status Expansion Belief (SEB)", y = "Density",
       title = "Distribution of SEB")
## Warning: Using `size` aesthetic for lines was deprecated in ggplot2 3.4.0.
## ℹ Please use `linewidth` instead.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
## Warning: The dot-dot notation (`..density..`) was deprecated in ggplot2 3.4.0.
## ℹ Please use `after_stat(density)` instead.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.