library(readxl)
## Warning: package 'readxl' was built under R version 4.2.3
Final_analysis_reboot_raw <- read_excel("C:/Users/Eric/Documents/R/Final analysis reboot raw.xlsx")
data <- Final_analysis_reboot_raw

Project background

The majority of individual struggle to maintain regular physical activity which can lead to negative health outcomes such as increased mortality risk, and psychological distress. Self-regulation has bee highlighted as a key factor contributing to regular physical activity. Previous research has established a negative indirect association between stress and regular physical activity (i.e., minutes and rate of perceived exertion) through self-control. The present study attempts to extend these results by testing if the negative indirect effect of stress is conditional on self-motivation and self-efficacy. Demographic information and data on trait self-motivation and trait self-efficacy were collected at intake. Data regarding participants stress, state self-regulation (i.e., state self-control, state self-motivation, and state self-efficacy), minutes of physical activity, and rate of perceived exertion were collected over a 2 day period using item sent via email throughout the day (a form ecological momentary assessment).

Data Processing

Missing Data analysis

Here I produce a heatmap of the missing data and create data frames containing the percent and missing of each case and variable.

library(naniar)

vis_miss(data)#provides a heatmap of missing data 
## Warning: `gather_()` was deprecated in tidyr 1.2.0.
## ℹ Please use `gather()` instead.
## ℹ The deprecated feature was likely used in the visdat package.
##   Please report the issue at <https://github.com/ropensci/visdat/issues>.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.

case_missing <- miss_case_summary(data)#returns the percent and number of missing for each case 
var_missing <- miss_var_summary(data) #returns the percent and number of missing values for each variable column 

Reverse Scoring Items

Here I reverse score the necessary items before imputation.

library(tidyverse)
## Warning: package 'tidyverse' was built under R version 4.2.3
## Warning: package 'ggplot2' was built under R version 4.2.3
## Warning: package 'tidyr' was built under R version 4.2.3
## Warning: package 'readr' was built under R version 4.2.3
## Warning: package 'purrr' was built under R version 4.2.3
## Warning: package 'stringr' was built under R version 4.2.3
## Warning: package 'forcats' was built under R version 4.2.3
## Warning: package 'lubridate' was built under R version 4.2.3
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr     1.0.10     ✔ readr     2.1.4 
## ✔ forcats   1.0.0      ✔ stringr   1.5.1 
## ✔ ggplot2   3.4.4      ✔ tibble    3.1.8 
## ✔ lubridate 1.9.3      ✔ tidyr     1.3.0 
## ✔ purrr     1.0.2      
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag()    masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
# reverse scoring SC, T_SE, STRESS items 
data <- data %>% 
  dplyr::mutate(`SEPA12` = dplyr::recode(`SEPA12`, `7` = 1, `6` = 2, `5` = 3, `4` = 4, `3` = 5, `2` = 6, `1` = 7)) %>%
  dplyr::mutate(`SEPA13` = dplyr::recode(`SEPA13`, `7` = 1, `6` = 2, `5` = 3, `4` = 4, `3` = 5, `2` = 6, `1` = 7)) %>%
  dplyr::mutate(`SEPA14` = dplyr::recode(`SEPA14`, `7` = 1, `6` = 2, `5` = 3, `4` = 4, `3` = 5, `2` = 6, `1` = 7)) %>%
  dplyr::mutate(`SSC1_1` = dplyr::recode(`SSC1_1`, `7` = 1, `6` = 2, `5` = 3, `4` = 4, `3` = 5, `2` = 6, `1` = 7)) %>%
  dplyr::mutate(`SSC3_1` = dplyr::recode(`SSC3_1`, `7` = 1, `6` = 2, `5` = 3, `4` = 4, `3` = 5, `2` = 6, `1` = 7)) %>%
  dplyr::mutate(`SSC4_1` = dplyr::recode(`SSC4_1`, `7` = 1, `6` = 2, `5` = 3, `4` = 4, `3` = 5, `2` = 6, `1` = 7)) %>%
  dplyr::mutate(`SSC1_2` = dplyr::recode(`SSC1_2`, `7` = 1, `6` = 2, `5` = 3, `4` = 4, `3` = 5, `2` = 6, `1` = 7)) %>%
  dplyr::mutate(`SSC3_2` = dplyr::recode(`SSC3_2`, `7` = 1, `6` = 2, `5` = 3, `4` = 4, `3` = 5, `2` = 6, `1` = 7)) %>%
  dplyr::mutate(`SSC4_2` = dplyr::recode(`SSC4_2`, `7` = 1, `6` = 2, `5` = 3, `4` = 4, `3` = 5, `2` = 6, `1` = 7)) %>%
  dplyr::mutate(`PSS4_1` = dplyr::recode(`PSS4_1`, `7` = 1, `6` = 2, `5` = 3, `4` = 4, `3` = 5, `2` = 6, `1` = 7)) %>%
  dplyr::mutate(`PSS5_1` = dplyr::recode(`PSS5_1`, `7` = 1, `6` = 2, `5` = 3, `4` = 4, `3` = 5, `2` = 6, `1` = 7)) %>%
  dplyr::mutate(`PSS8_1` = dplyr::recode(`PSS8_1`, `7` = 1, `6` = 2, `5` = 3, `4` = 4, `3` = 5, `2` = 6, `1` = 7)) %>%
  dplyr::mutate(`PSS4_2` = dplyr::recode(`PSS4_2`, `7` = 1, `6` = 2, `5` = 3, `4` = 4, `3` = 5, `2` = 6, `1` = 7)) %>%
  dplyr::mutate(`PSS5_2` = dplyr::recode(`PSS5_2`, `7` = 1, `6` = 2, `5` = 3, `4` = 4, `3` = 5, `2` = 6, `1` = 7)) %>%
  dplyr::mutate(`PSS8_2` = dplyr::recode(`PSS8_2`, `7` = 1, `6` = 2, `5` = 3, `4` = 4, `3` = 5, `2` = 6, `1` = 7)) 

Imputation

First I create a subet of items for each measure.

library(tidyverse)
t.sm <-data %>%
  dplyr::select(RM4FM1,RM4FM2,RM4FM3,RM4FM4,RM4FM5,RM4FM6,RM4FM7,RM4FM8,RM4FM9,
         RM4FM10,RM4FM11,RM4FM12) #subsetting RM4FM only 
t.se <- data %>%
  dplyr::select(SEPA1,SEPA2,SEPA3,SEPA4,SEPA5,SEPA6,SEPA7,SEPA8,SEPA9,SEPA10,SEPA11,SEPA12,SEPA13,SEPA14,SEPA15,SEPA16,SEPA17)

stressd1 <- data %>%
  dplyr::select(PSS1_1,PSS2_1,PSS3_1,PSS4_1,PSS5_1,PSS6_1,PSS7_1,PSS8_1,PSS9_1,PSS10_1)
stressd2 <- data %>%
  dplyr::select(PSS1_2,PSS2_2,PSS3_2,PSS4_2,PSS5_2,PSS6_2,PSS7_2,PSS8_2,PSS9_2,PSS10_2) 


s.msrd1<- data %>%
  dplyr::select(SSC1_1, SSC2_1 , SSC3_1 , SSC4_1 , SSC5_1, SE_DAILY_1, MOT_INT_1)
s.msrd2<- data%>%
  dplyr::select(SSC1_2, SSC2_2 , SSC3_2 , SSC4_2 , SSC5_2, SE_DAILY_2, MOT_INT_2)

Here I impute data for each questionnaire.

library(mice)
## Warning: package 'mice' was built under R version 4.2.3
## 
## Attaching package: 'mice'
## The following object is masked from 'package:stats':
## 
##     filter
## The following objects are masked from 'package:base':
## 
##     cbind, rbind
t.sm.impute <- t.sm %>%
  mice()
## 
##  iter imp variable
##   1   1  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   1   2  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   1   3  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   1   4  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   1   5  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   2   1  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   2   2  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   2   3  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   2   4  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   2   5  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   3   1  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   3   2  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   3   3  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   3   4  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   3   5  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   4   1  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   4   2  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   4   3  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   4   4  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   4   5  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   5   1  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   5   2  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   5   3  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   5   4  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
##   5   5  RM4FM1  RM4FM2  RM4FM3  RM4FM4  RM4FM5  RM4FM6  RM4FM7  RM4FM8  RM4FM9  RM4FM10  RM4FM11  RM4FM12
t.sm.impute <- complete(t.sm.impute)

t.se.impute <- t.se %>%
  mice()
## 
##  iter imp variable
##   1   1  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   1   2  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   1   3  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   1   4  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   1   5  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   2   1  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   2   2  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   2   3  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   2   4  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   2   5  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   3   1  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   3   2  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   3   3  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   3   4  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   3   5  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   4   1  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   4   2  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   4   3  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   4   4  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   4   5  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   5   1  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   5   2  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   5   3  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   5   4  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
##   5   5  SEPA1  SEPA2  SEPA3  SEPA4  SEPA5  SEPA6  SEPA7  SEPA8  SEPA9  SEPA10  SEPA11  SEPA12  SEPA13  SEPA14  SEPA15  SEPA16  SEPA17
t.se.impute <- complete(t.se.impute)

stressd1.impute <- stressd1%>%
  mice()
## 
##  iter imp variable
##   1   1  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   1   2  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   1   3  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   1   4  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   1   5  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   2   1  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   2   2  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   2   3  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   2   4  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   2   5  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   3   1  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   3   2  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   3   3  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   3   4  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   3   5  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   4   1  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   4   2  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   4   3  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   4   4  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   4   5  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   5   1  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   5   2  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   5   3  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   5   4  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
##   5   5  PSS1_1  PSS2_1  PSS3_1  PSS4_1  PSS5_1  PSS6_1  PSS7_1  PSS8_1  PSS9_1  PSS10_1
stressd1.impute <- complete(stressd1.impute)

stressd2.impute <- stressd2%>%
  mice()
## 
##  iter imp variable
##   1   1  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   1   2  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   1   3  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   1   4  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   1   5  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   2   1  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   2   2  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   2   3  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   2   4  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   2   5  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   3   1  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   3   2  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   3   3  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   3   4  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   3   5  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   4   1  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   4   2  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   4   3  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   4   4  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   4   5  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   5   1  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   5   2  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   5   3  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   5   4  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
##   5   5  PSS1_2  PSS2_2  PSS3_2  PSS4_2  PSS5_2  PSS6_2  PSS7_2  PSS8_2  PSS9_2  PSS10_2
stressd2.impute <- complete(stressd2.impute)

s.msrd1.impute <- s.msrd1%>%
  mice()
## 
##  iter imp variable
##   1   1  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   1   2  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   1   3  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   1   4  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   1   5  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   2   1  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   2   2  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   2   3  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   2   4  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   2   5  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   3   1  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   3   2  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   3   3  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   3   4  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   3   5  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   4   1  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   4   2  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   4   3  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   4   4  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   4   5  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   5   1  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   5   2  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   5   3  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   5   4  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
##   5   5  SSC1_1  SSC2_1  SSC3_1  SSC4_1  SSC5_1  SE_DAILY_1  MOT_INT_1
s.msrd1.impute <- complete(s.msrd1.impute)

s.msrd2.impute <- s.msrd2%>%
  mice()
## 
##  iter imp variable
##   1   1  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   1   2  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   1   3  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   1   4  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   1   5  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   2   1  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   2   2  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   2   3  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   2   4  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   2   5  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   3   1  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   3   2  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   3   3  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   3   4  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   3   5  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   4   1  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   4   2  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   4   3  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   4   4  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   4   5  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   5   1  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   5   2  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   5   3  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   5   4  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
##   5   5  SSC1_2  SSC2_2  SSC3_2  SSC4_2  SSC5_2  SE_DAILY_2  MOT_INT_2
s.msrd2.impute <- complete(s.msrd2.impute)

Here I am creating totals and confirming the impute for each measure by creating a before and after missing heatmap.

library(tidyverse)
library(naniar)

###trait self motivation total  
t.sm <- t.sm  %>% 
  mutate(INT= (RM4FM1+RM4FM4+RM4FM11)/3,
         IDEN = (RM4FM2+RM4FM8+RM4FM10)/3,
         INTRO = (RM4FM3+RM4FM6+RM4FM9)/3,
         EXT = (RM4FM5+RM4FM7+RM4FM12)/3)
t.sm <-t.sm  %>% 
  mutate(T_SM= (EXT*-2)+ (INTRO*-1)+ (IDEN*1)+(INT*2))

t.sm.impute <- t.sm.impute  %>% 
  mutate(INT= (RM4FM1+RM4FM4+RM4FM11)/3,
         IDEN = (RM4FM2+RM4FM8+RM4FM10)/3,
         INTRO = (RM4FM3+RM4FM6+RM4FM9)/3,
         EXT = (RM4FM5+RM4FM7+RM4FM12)/3)
t.sm.impute <-t.sm.impute  %>% 
  mutate(T_SM= (EXT*-2)+ (INTRO*-1)+ (IDEN*1)+(INT*2))

vis_miss(t.sm)#provides a heatmap of missing data 

vis_miss(t.sm.impute)#provides a heatmap of missing data 

###trait self efficacy total
t.se <-t.se  %>%
  dplyr::mutate(T_SE= (`SEPA1`+`SEPA2`+`SEPA3`+`SEPA4`+`SEPA5`+`SEPA6`+`SEPA7`+`SEPA8`+
                  `SEPA9`+`SEPA10`+`SEPA11`+`SEPA12`+`SEPA13`+`SEPA14`+`SEPA15`+
                  `SEPA16`+`SEPA17`)/17)

t.se.impute <-t.se.impute  %>%
  dplyr::mutate(T_SE= (`SEPA1`+`SEPA2`+`SEPA3`+`SEPA4`+`SEPA5`+`SEPA6`+`SEPA7`+`SEPA8`+
                  `SEPA9`+`SEPA10`+`SEPA11`+`SEPA12`+`SEPA13`+`SEPA14`+`SEPA15`+
                  `SEPA16`+`SEPA17`)/17)
vis_miss(t.se)#provides a heatmap of missing data 

vis_miss(t.se.impute)#provides a heatmap of missing data 

### Perceived stress total  
stress = cbind(stressd1, stressd2)
stress <- stress  %>%
  mutate(STRESS = (PSS1_1+PSS2_1+PSS3_1+PSS4_1+PSS5_1+PSS6_1+PSS7_1+PSS8_1+PSS9_1+PSS10_1 
                   + PSS1_2+PSS2_2+PSS3_2+PSS4_2+PSS5_2+PSS6_2+PSS7_2+PSS8_2+PSS9_2+PSS10_2)/20 ) 

stress.impute = cbind(stressd1.impute, stressd2.impute)
stress.impute <- stress.impute  %>%
  mutate(STRESS = (PSS1_1+PSS2_1+PSS3_1+PSS4_1+PSS5_1+PSS6_1+PSS7_1+PSS8_1+PSS9_1+PSS10_1 
                   + PSS1_2+PSS2_2+PSS3_2+PSS4_2+PSS5_2+PSS6_2+PSS7_2+PSS8_2+PSS9_2+PSS10_2)/20 ) 

vis_miss(stress)#provides a heatmap of missing data 

vis_miss(stress.impute)#provides a heatmap of missing data 

## smsr total 
s.msr = cbind(s.msrd1, s.msrd2)
s.msr <- s.msr %>%
  mutate(S_MSR = (SSC1_1+SSC2_1+SSC3_1+SSC4_1+SSC5_1 +SSC1_2+SSC2_2+SSC3_2+SSC4_2+SSC5_2 + MOT_INT_1+MOT_INT_2+SE_DAILY_1+SE_DAILY_2)/14) 

s.msr.impute = cbind(s.msrd1.impute, s.msrd2.impute)
s.msr.impute <- s.msr.impute %>%
  mutate(S_MSR = (SSC1_1+SSC2_1+SSC3_1+SSC4_1+SSC5_1 +SSC1_2+SSC2_2+SSC3_2+SSC4_2+SSC5_2 + MOT_INT_1+MOT_INT_2+SE_DAILY_1+SE_DAILY_2)/14) 

vis_miss(s.msr)#provides a heatmap of missing data 

vis_miss(s.msr.impute)#provides a heatmap of missing data 

Conducting t-tests to ensure imputed totals are similar to observed totals. Stress, state self regulation, and trait self motivation were significantly different (this may vary based on the result of the impute). The signficant difference indicates data was not missing at random (NMAR) vs missing at random (MAR). This means that for stress and trait self motivation the missing data are dependent on the missing values . Perhaps individuals who were more stressed were less likely to respond due to being overwhelmed; those who had lower levels of self-regulation or who were more externally motivated were less likely to respond as they would theoretically be less able to regulate their behaviour as intended. Given the differences aren’t extreme, we can proceed with the imputed data as NMAR needs to be handled to avoid biasing the model, we would just expect the imputed data to be different from the observed.

# t-test for trait self-motivaiton 
t.sm<- t.sm %>%
  mutate(missing = ifelse(is.na(T_SM), "yes", "no")) #creates grouping variable based on missing or not  
t.sm.impute <- t.sm.impute %>%
  mutate(missing = t.sm$missing) # moves the grouping variable 'missing' to the imputed data set  
t.test(T_SM ~ missing, data = t.sm.impute) 
## 
##  Welch Two Sample t-test
## 
## data:  T_SM by missing
## t = -2.2091, df = 47.053, p-value = 0.03207
## alternative hypothesis: true difference in means between group no and group yes is not equal to 0
## 95 percent confidence interval:
##  -2.6526622 -0.1240574
## sample estimates:
##  mean in group no mean in group yes 
##      -0.007407407       1.380952381
# t-test for trait self-efficacy 
t.se<- t.se %>%
  mutate(missing = ifelse(is.na(T_SE), "yes", "no")) #creates grouping variable based on missing or not  
t.se.impute <- t.se.impute %>%
  mutate(missing = t.se$missing) # moves the grouping variable 'missing' to the imputed data set  
t.test(T_SE ~ missing, data = t.se.impute) 
## 
##  Welch Two Sample t-test
## 
## data:  T_SE by missing
## t = 1.1522, df = 75.433, p-value = 0.2529
## alternative hypothesis: true difference in means between group no and group yes is not equal to 0
## 95 percent confidence interval:
##  -0.0966897  0.3620382
## sample estimates:
##  mean in group no mean in group yes 
##          4.342613          4.209939
# t-test for stress
stress <- stress %>%
    mutate(missing = ifelse(is.na(STRESS), "yes", "no")) #creates grouping variable based on missing or not  
stress.impute <- stress.impute %>%
  mutate(missing = stress$missing) # moves the grouping variable 'missing' to the imputed data set  
t.test(STRESS ~ missing, data = stress.impute) 
## 
##  Welch Two Sample t-test
## 
## data:  STRESS by missing
## t = 3.0458, df = 122.6, p-value = 0.002841
## alternative hypothesis: true difference in means between group no and group yes is not equal to 0
## 95 percent confidence interval:
##  0.1067560 0.5031198
## sample estimates:
##  mean in group no mean in group yes 
##          3.659286          3.354348
# t-test for state multifactor self-regulation 
s.msr<- s.msr %>%
  mutate(missing = ifelse(is.na(S_MSR), "yes", "no")) #creates grouping variable based on missing or not  
s.msr.impute <- s.msr.impute %>%
  mutate(missing = s.msr$missing) # moves the grouping variable 'missing' to the imputed data set  
t.test(S_MSR ~ missing, data = s.msr.impute) 
## 
##  Welch Two Sample t-test
## 
## data:  S_MSR by missing
## t = -2.292, df = 146.16, p-value = 0.02334
## alternative hypothesis: true difference in means between group no and group yes is not equal to 0
## 95 percent confidence interval:
##  -0.41127634 -0.03041279
## sample estimates:
##  mean in group no mean in group yes 
##          4.331536          4.552381

Subsetting Model Data and Final Replacements

Here I create a new dataframe by subsetting model variables and imputed totals. I check it for missing data.

data = data %>%
  select(AGE,SEX,INCOME,ADULTS_NUM,MINORS_NUM, MIN_1, MIN_2, RPE_1, RPE_2)
t.sm.impute = t.sm.impute%>%
  select(T_SM)
t.se.impute = t.se.impute %>% 
  select(T_SE)
stress.impute = stress.impute %>%
  select(STRESS)
s.msr.impute = s.msr.impute %>%
  select(S_MSR)

model.data = cbind(data,s.msr.impute,stress.impute,t.se.impute,t.sm.impute)

vis_miss(model.data)

Here I am replacing NA values with mean and mode. I then calculate a 2 day average for minutes of physical activity and rate of perceived exertion. Lastly, I produce a missing value heatmap to confirm NA replacement.

#mean replacement 
model.data$AGE[is.na(model.data$AGE)]<-mean(model.data$AGE,na.rm=TRUE)
model.data$INCOME[is.na(model.data$INCOME)]<-mean(model.data$INCOME,na.rm=TRUE)
model.data$MIN_2[is.na(model.data$MIN_2)]<-mean(model.data$MIN_2,na.rm=TRUE)
model.data$RPE_1[is.na(model.data$RPE_1)]<-mean(model.data$RPE_1,na.rm=TRUE)
model.data$RPE_2[is.na(model.data$RPE_2)]<-mean(model.data$RPE_2,na.rm=TRUE)


#mode replacement 
get_mode <- function(x) {
  counts <- table(x)  # Create a frequency table
  mode <- names(counts)[counts == max(counts)]  # Find the values with the maximum frequency
  return(mode)
}

model.data$MINORS_NUM[is.na(model.data$MINORS_NUM)]<-get_mode(model.data$MINORS_NUM)
model.data$ADULTS_NUM[is.na(model.data$ADULTS_NUM)]<-get_mode(model.data$ADULTS_NUM)
model.data$SEX[is.na(model.data$SEX)]<-get_mode(model.data$SEX)



# here I am calculating average minutes and average rate of perceived exertion 
model.data <- model.data %>%
  dplyr::mutate(AVG.MIN = (MIN_1+MIN_2)/2,
                AVG.RPE = (RPE_1+RPE_2)/2)

# removinh min and rpe 1 and 2 from model data 
model.data <- model.data %>%
  dplyr::select(-MIN_1,-MIN_2,-RPE_1,-RPE_2)


vis_miss(model.data)

EDA

Descriptive statistics

First, I clean my R environment of all objects except my model data. I then produce a descriptive statistics table. To get counts for SEX, ADULTS_NUM, and MINORS_NUM they need to be converted ot factors.

rm(list=setdiff(ls(), "model.data")) # cleans r environment 
model.data <- model.data%>% # needs to be factor for descriptives 
  dplyr::mutate(SEX = as.factor(SEX),
         ADULTS_NUM = as.factor(ADULTS_NUM),
         MINORS_NUM = as.factor(MINORS_NUM))
library(skimr)
## Warning: package 'skimr' was built under R version 4.2.3
## 
## Attaching package: 'skimr'
## The following object is masked from 'package:naniar':
## 
##     n_complete
skim(model.data)
Data summary
Name model.data
Number of rows 582
Number of columns 11
_______________________
Column type frequency:
factor 3
numeric 8
________________________
Group variables None

Variable type: factor

skim_variable n_missing complete_rate ordered n_unique top_counts
SEX 0 1 FALSE 2 1: 340, 2: 242
ADULTS_NUM 0 1 FALSE 6 2: 345, 3: 100, 1: 99, 4: 30
MINORS_NUM 0 1 FALSE 6 1: 249, 2: 162, 0: 146, 3: 14

Variable type: numeric

skim_variable n_missing complete_rate mean sd p0 p25 p50 p75 p100 hist
AGE 0 1 33.83 9.02 16.00 27.00 33.00 38.75 6.40e+01 ▃▇▅▂▁
INCOME 0 1 62850.16 48979.63 0.00 37625.00 62850.16 71000.00 6.50e+05 ▇▁▁▁▁
S_MSR 0 1 4.37 0.86 2.14 3.79 4.14 4.86 6.93e+00 ▁▇▇▃▂
STRESS 0 1 3.61 0.85 1.10 3.00 3.90 4.20 6.60e+00 ▁▂▇▁▁
T_SE 0 1 4.33 0.94 2.06 3.65 4.41 5.00 6.35e+00 ▂▅▇▇▂
T_SM 0 1 0.09 3.80 -11.33 -1.67 0.00 2.00 1.30e+01 ▁▃▇▂▁
AVG.MIN 0 1 65.79 32.96 0.00 42.50 60.00 85.50 1.80e+02 ▃▇▃▂▁
AVG.RPE 0 1 4.51 1.74 1.00 3.00 4.50 5.50 1.00e+01 ▂▇▅▂▁

Correlation Plots

Here I explore the bivariate relationships using correlation plots. Only two plots are shown as examples. Within these plots we can see a strong negative relationship between stress and state multifactor self regulation indicating those with higher stress likely had lower state self-regulation. We can also see a very small positive association between trait self efficacy and average rate of perceived exertion indicating those with higher trait self efficacy likely had higher average rate of perceived exertion.

library(ggplot2)
library(ggExtra)

correlation <- cor.test(model.data$STRESS, model.data$S_MSR)
# Extract the correlation coefficient and p-value
cor_coef <- correlation$estimate
p_value <- correlation$p.value
# Create the combined text object
combined_text <- paste("r=", round(cor_coef, 2),
                       "p=", format.pval(p_value, digits = 3))
#create plot with combine text as title 
plot<- ggplot(model.data, aes(x = STRESS, y = S_MSR)) +
  geom_point() +
  geom_smooth(method = "lm", se = T) +
  labs(x = "STRESS", y = "S_MSR", title = combined_text)
#adding marginal histograms 
plot<-ggMarginal(plot, type="histogram")
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using formula = 'y ~ x'
plot

library(ggplot2)
library(ggExtra)

correlation <- cor.test(model.data$T_SE, model.data$AVG.RPE)
# Extract the correlation coefficient and p-value
cor_coef <- correlation$estimate
p_value <- correlation$p.value
# Create the combined text object
combined_text <- paste("r=", round(cor_coef, 2),
                       "p=", format.pval(p_value, digits = 3))
#create plot with combine text as title 
plot<- ggplot(model.data, aes(x = T_SE, y = AVG.RPE)) +
  geom_point() +
  geom_smooth(method = "lm", se = T) +
  labs(x = "T_SE", y = "AG.RPE", title = combined_text)
#adding marginal histograms 
plot<-ggMarginal(plot, type="histogram")
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using formula = 'y ~ x'
## `geom_smooth()` using formula = 'y ~ x'
plot

Model Training and Evaluation

First I create the train-test datasets and interaction terms within them.

model.data <- model.data%>% # needs to be numeric for modeling
  dplyr::mutate(SEX = as.numeric(SEX),
         ADULTS_NUM = as.numeric(ADULTS_NUM),
         MINORS_NUM = as.numeric(MINORS_NUM))
library(caTools)
## Warning: package 'caTools' was built under R version 4.2.3
#splititng data 
set.seed(101)
split.data = sample.split(model.data$AVG.RPE, SplitRatio= 0.75)
train = subset(model.data, split.data == TRUE)
test = subset(model.data, split.data == FALSE)
nrow(train)
## [1] 438
nrow(test)
## [1] 144
# computing XW and XZ interaction terms in both data sets  
train = train %>%
  dplyr::mutate(XW = STRESS*T_SM,
                XZ = STRESS*T_SE, 
                MW = S_MSR*T_SM,
                MZ = S_MSR*T_SE)

test = test %>%
  dplyr::mutate(XW = STRESS*T_SM,
                XZ = STRESS*T_SE, 
                MW = S_MSR*T_SM,
                MZ = S_MSR*T_SE)

Here I fit a model with min as the outcome. Only trait self-motivation appears to significantly contribute to minutes of daily physical activity. Furthermore, the model has poor accuracy indicated by the small R2, mismatch between observed and predicted values, and non normal residuals. This means there will be a large degree of error in the predictions made from the model, and the error in these predictions seems to be greater when estimating higher levels of physical activity.

# fitting y-min model on trainning data 
y_min_mod = lm(AVG.MIN ~ STRESS + S_MSR + T_SM + MW + T_SE + MZ + ADULTS_NUM + MINORS_NUM + INCOME + SEX + AGE, data = train)
summary(y_min_mod)
## 
## Call:
## lm(formula = AVG.MIN ~ STRESS + S_MSR + T_SM + MW + T_SE + MZ + 
##     ADULTS_NUM + MINORS_NUM + INCOME + SEX + AGE, data = train)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -51.690 -22.068  -6.298  14.866 112.954 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)  
## (Intercept) -6.546e+01  4.834e+01  -1.354   0.1764  
## STRESS       5.838e+00  3.365e+00   1.735   0.0835 .
## S_MSR        1.588e+01  1.014e+01   1.566   0.1180  
## T_SM         4.288e+00  2.253e+00   1.903   0.0577 .
## MW          -7.852e-01  4.987e-01  -1.575   0.1161  
## T_SE         1.092e+01  9.576e+00   1.140   0.2549  
## MZ          -1.223e+00  2.133e+00  -0.573   0.5666  
## ADULTS_NUM   3.675e+00  2.106e+00   1.744   0.0818 .
## MINORS_NUM  -1.131e+00  1.814e+00  -0.623   0.5335  
## INCOME      -5.261e-05  3.347e-05  -1.572   0.1168  
## SEX          1.047e+00  3.067e+00   0.341   0.7329  
## AGE          3.085e-01  1.776e-01   1.737   0.0831 .
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 31.26 on 426 degrees of freedom
## Multiple R-squared:  0.1254, Adjusted R-squared:  0.1029 
## F-statistic: 5.555 on 11 and 426 DF,  p-value: 2.477e-08
#predicitng min from trained model 
X_test_y = test %>%
  dplyr::select(-AVG.RPE, - AVG.MIN) # removing outcomes from test data 
y_min_predictions <- predict(y_min_mod, newdata = X_test_y)


#model evaluation plotting predicted vs observed 
plot.data.min <- data.frame(test_min = test$AVG.MIN, min_predictions = y_min_predictions)
# Create the density plot with overlapping distributions
library(ggplot2)
ggplot(plot.data.min, aes(x = test_min, fill = "test_min")) +
  geom_density(alpha = 0.5) +
  geom_density(aes(x = min_predictions, fill = "min_predictions"), alpha = 0.5) +
  labs(x = "Value", y = "Density") +
  scale_fill_manual(values = c("test_min" = "blue", "min_predictions" = "red")) +
  theme_minimal()

# model evaluation plotting model residuals 
residuals_min <- residuals(y_min_mod)
library(car)
## Loading required package: carData
## 
## Attaching package: 'car'
## The following object is masked from 'package:dplyr':
## 
##     recode
## The following object is masked from 'package:purrr':
## 
##     some
qqPlot(residuals_min)

##  45 247 
##  39 201

Here I fit a model with rpe as the outcome. We can see that stress, state multifactor self-regulation, trait self-efficacy, trait self efficacy-state multifactor self-regulation interaction term, and sex significantly predict average daily rate of perceived exertion. The model still has low accuracy with a small R2 and large difference between observed values and those predicted by the trained model; however, the residuals are normally distributed. Thus, predictions will have greater uncertainty but this will be spread evenly throughout the model.

# fititng the y-rpe model on training data 
y_rpe_mod = lm(AVG.RPE ~ STRESS + S_MSR + T_SM + MW + T_SE + MZ + ADULTS_NUM + MINORS_NUM + INCOME + SEX + AGE, data = train)
summary(y_rpe_mod)
## 
## Call:
## lm(formula = AVG.RPE ~ STRESS + S_MSR + T_SM + MW + T_SE + MZ + 
##     ADULTS_NUM + MINORS_NUM + INCOME + SEX + AGE, data = train)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -4.3157 -1.1858 -0.0942  1.0468  5.0104 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)    
## (Intercept) -9.009e+00  2.556e+00  -3.525 0.000469 ***
## STRESS       5.374e-01  1.779e-01   3.021 0.002670 ** 
## S_MSR        2.111e+00  5.361e-01   3.937 9.65e-05 ***
## T_SM        -2.660e-02  1.191e-01  -0.223 0.823353    
## MW           2.113e-02  2.636e-02   0.801 0.423392    
## T_SE         1.815e+00  5.062e-01   3.586 0.000375 ***
## MZ          -3.730e-01  1.127e-01  -3.309 0.001017 ** 
## ADULTS_NUM   1.955e-01  1.114e-01   1.756 0.079864 .  
## MINORS_NUM   9.434e-02  9.592e-02   0.984 0.325895    
## INCOME      -1.444e-06  1.770e-06  -0.816 0.414840    
## SEX          3.994e-01  1.621e-01   2.463 0.014169 *  
## AGE          1.134e-02  9.390e-03   1.208 0.227889    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.653 on 426 degrees of freedom
## Multiple R-squared:  0.1292, Adjusted R-squared:  0.1067 
## F-statistic: 5.746 on 11 and 426 DF,  p-value: 1.126e-08
# predicitng rpe from trained model 
y_rpe_predictions <- predict(y_rpe_mod, newdata = X_test_y)


#model evaluation plotting predicted vs observed 
plot.data.rpe <- data.frame(test_rpe = test$AVG.RPE, rpe_predictions = y_rpe_predictions)
# Create the density plot with overlapping distributions
library(ggplot2)
ggplot(plot.data.rpe, aes(x = test_rpe, fill = "test_rpe")) +
  geom_density(alpha = 0.5) +
  geom_density(aes(x = rpe_predictions, fill = "rpe_predictions"), alpha = 0.5) +
  labs(x = "Value", y = "Density") +
  scale_fill_manual(values = c("test_rpe" = "blue", "rpe_predictions" = "red")) +
  theme_minimal()

# model evaluation plotting model residuals 
residuals_rpe <- residuals(y_rpe_mod)
library(car)
qqPlot(residuals_rpe)

##  94 409 
##  80 324

Here I fit a model with rpe as the outcome.We can see that stress, state multifactor self-regulation, trait self-efficacy, trait self efficacy-stress interaction term, and age significantly predict average daily rate of perceived exertion. The model appears to have good accuracy with a large R2, high degree of overlap between the observed and predicted state multifactor self-regulation values; however, the residuals have some skew suggesting the error is not evenly distributed throughout the model.

# fitting m model 
m_mod = lm(S_MSR ~ STRESS + T_SM + XW + T_SE + XZ + ADULTS_NUM + MINORS_NUM + INCOME + SEX + AGE, data = train)
summary(m_mod)
## 
## Call:
## lm(formula = S_MSR ~ STRESS + T_SM + XW + T_SE + XZ + ADULTS_NUM + 
##     MINORS_NUM + INCOME + SEX + AGE, data = train)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.29810 -0.26256 -0.03499  0.23682  1.62961 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  4.826e+00  5.307e-01   9.093  < 2e-16 ***
## STRESS      -3.087e-01  1.422e-01  -2.171 0.030455 *  
## T_SM        -1.421e-02  2.598e-02  -0.547 0.584585    
## XW           9.143e-03  7.209e-03   1.268 0.205420    
## T_SE         4.793e-01  1.098e-01   4.365  1.6e-05 ***
## XZ          -1.031e-01  3.045e-02  -3.386 0.000775 ***
## ADULTS_NUM   2.007e-02  3.001e-02   0.669 0.503899    
## MINORS_NUM  -3.346e-02  2.608e-02  -1.283 0.200269    
## INCOME      -5.099e-07  4.785e-07  -1.066 0.287153    
## SEX          2.168e-02  4.400e-02   0.493 0.622434    
## AGE          4.861e-03  2.526e-03   1.925 0.054929 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.4474 on 427 degrees of freedom
## Multiple R-squared:  0.7395, Adjusted R-squared:  0.7334 
## F-statistic: 121.2 on 10 and 427 DF,  p-value: < 2.2e-16
# predicting s.msr from trained model 
X_test_m = test %>%
  dplyr::select(-AVG.MIN, -AVG.RPE, -S_MSR) # removing outcomes from dataset 
m_predictions <- predict(m_mod, newdata = X_test_m)


#model evaluation plotting predicted vs observed 
plot.data <- data.frame(s_msr_test = test$S_MSR, predictions = m_predictions)
# Create the density plot with overlapping distributions
library(ggplot2)
ggplot(plot.data, aes(x = s_msr_test, fill = "s_msr_test")) +
  geom_density(alpha = 0.5) +
  geom_density(aes(x = m_predictions, fill = "m_predictions"), alpha = 0.5) +
  labs(x = "Value", y = "Density") +
  scale_fill_manual(values = c("s_msr_test" = "blue", "m_predictions" = "red")) +
  theme_minimal()

# model evaluation plotting model residuals 
residuals <- residuals(m_mod)
library(car)
qqPlot(residuals)

## 129  25 
## 109  22

moderated meditaion

Here I am fitting a moderated mediation model consisting of the rpe (y) and s.msr (m) models to produce estimates of the conditional indirect effect of stress on average daily rate of perceived exertion, probe the interaction effects, and predict values of average rate of perceived exertion and state multifactor self-regulation for visualizing each interaction.

# fitting conditional process model 
source("C:/Users/Eric/Documents/R/process.R") #loading Hayes 'process' funciton
## 
## ********************* PROCESS for R Version 4.3.1 ********************* 
##  
##            Written by Andrew F. Hayes, Ph.D.  www.afhayes.com              
##    Documentation available in Hayes (2022). www.guilford.com/p/hayes3   
##  
## *********************************************************************** 
##  
## PROCESS is now ready for use.
## Copyright 2020-2023 by Andrew F. Hayes ALL RIGHTS RESERVED
## Workshop schedule at http://haskayne.ucalgary.ca/CCRAM
## 
# rpe as the outcome 
process(data=train,y="AVG.RPE",x="STRESS",m="S_MSR",w="T_SM",z="T_SE",cov=c("ADULTS_NUM","MINORS_NUM", "INCOME", "SEX", "AGE"), model=75, plot =1)
## 
## ********************* PROCESS for R Version 4.3.1 ********************* 
##  
##            Written by Andrew F. Hayes, Ph.D.  www.afhayes.com              
##    Documentation available in Hayes (2022). www.guilford.com/p/hayes3   
##  
## *********************************************************************** 
##                
## Model : 75     
##     Y : AVG.RPE
##     X : STRESS 
##     M : S_MSR  
##     W : T_SM   
##     Z : T_SE   
## 
## Covariates: 
##        ADULTS_NUM MINORS_NUM INCOME SEX AGE
## 
## Sample size: 438
## 
## Random seed: 921091
## 
## 
## *********************************************************************** 
## Outcome Variable: S_MSR
## 
## Model Summary: 
##           R      R-sq       MSE         F       df1       df2         p
##      0.8599    0.7395    0.2002  121.2159   10.0000  427.0000    0.0000
## 
## Model: 
##                coeff        se         t         p      LLCI      ULCI
## constant      4.8256    0.5307    9.0933    0.0000    3.7826    5.8687
## STRESS       -0.3087    0.1422   -2.1713    0.0305   -0.5881   -0.0293
## T_SM         -0.0142    0.0260   -0.5471    0.5846   -0.0653    0.0368
## Int_1         0.0091    0.0072    1.2682    0.2054   -0.0050    0.0233
## T_SE          0.4793    0.1098    4.3650    0.0000    0.2635    0.6951
## Int_2        -0.1031    0.0305   -3.3858    0.0008   -0.1630   -0.0432
## ADULTS_NUM    0.0201    0.0300    0.6689    0.5039   -0.0389    0.0791
## MINORS_NUM   -0.0335    0.0261   -1.2828    0.2003   -0.0847    0.0178
## INCOME       -0.0000    0.0000   -1.0657    0.2872   -0.0000    0.0000
## SEX           0.0217    0.0440    0.4928    0.6224   -0.0648    0.1082
## AGE           0.0049    0.0025    1.9247    0.0549   -0.0001    0.0098
## 
## Product terms key:
## Int_1  :  STRESS  x  T_SM      
## Int_2  :  STRESS  x  T_SE      
## 
## Test(s) of highest order unconditional interaction(s):
##           R2-chng         F       df1       df2         p
## X*W        0.0010    1.6083    1.0000  427.0000    0.2054
## X*Z        0.0070   11.4638    1.0000  427.0000    0.0008
## BOTH(X)    0.0071    5.7985    2.0000  427.0000    0.0033
## ----------
## Focal predictor: STRESS (X)
##       Moderator: T_SM (W)
##       Moderator: T_SE (Z)
## 
## Conditional effects of the focal predictor at values of the moderator(s):
##        T_SM      T_SE    effect        se         t         p      LLCI
##     -2.6667    3.2941   -0.6727    0.0498  -13.5022    0.0000   -0.7706
##     -2.6667    4.4118   -0.7879    0.0406  -19.4181    0.0000   -0.8677
##     -2.6667    5.2941   -0.8789    0.0512  -17.1608    0.0000   -0.9796
##      0.0000    3.2941   -0.6483    0.0496  -13.0666    0.0000   -0.7459
##      0.0000    4.4118   -0.7636    0.0318  -24.0076    0.0000   -0.8261
##      0.0000    5.2941   -0.8545    0.0388  -22.0426    0.0000   -0.9307
##      3.6667    3.2941   -0.6148    0.0603  -10.1892    0.0000   -0.7334
##      3.6667    4.4118   -0.7300    0.0367  -19.9035    0.0000   -0.8021
##      3.6667    5.2941   -0.8210    0.0342  -24.0075    0.0000   -0.8882
##        ULCI
##     -0.5748
##     -0.7082
##     -0.7783
##     -0.5508
##     -0.7011
##     -0.7783
##     -0.4962
##     -0.6579
##     -0.7538
## 
## Data for visualizing the conditional effect of the focal predictor:
##      STRESS      T_SM      T_SE     S_MSR
##      2.4000   -2.6667    3.2941    4.9818
##      3.9000   -2.6667    3.2941    3.9727
##      4.3000   -2.6667    3.2941    3.7036
##      2.4000   -2.6667    4.4118    5.2409
##      3.9000   -2.6667    4.4118    4.0590
##      4.3000   -2.6667    4.4118    3.7438
##      2.4000   -2.6667    5.2941    5.4455
##      3.9000   -2.6667    5.2941    4.1271
##      4.3000   -2.6667    5.2941    3.7755
##      2.4000    0.0000    3.2941    5.0024
##      3.9000    0.0000    3.2941    4.0299
##      4.3000    0.0000    3.2941    3.7705
##      2.4000    0.0000    4.4118    5.2615
##      3.9000    0.0000    4.4118    4.1162
##      4.3000    0.0000    4.4118    3.8107
##      2.4000    0.0000    5.2941    5.4661
##      3.9000    0.0000    5.2941    4.1843
##      4.3000    0.0000    5.2941    3.8425
##      2.4000    3.6667    3.2941    5.0307
##      3.9000    3.6667    3.2941    4.1085
##      4.3000    3.6667    3.2941    3.8626
##      2.4000    3.6667    4.4118    5.2899
##      3.9000    3.6667    4.4118    4.1948
##      4.3000    3.6667    4.4118    3.9028
##      2.4000    3.6667    5.2941    5.4944
##      3.9000    3.6667    5.2941    4.2629
##      4.3000    3.6667    5.2941    3.9345
## 
## *********************************************************************** 
## Outcome Variable: AVG.RPE
## 
## Model Summary: 
##           R      R-sq       MSE         F       df1       df2         p
##      0.3594    0.1292    2.7315    5.7457   11.0000  426.0000    0.0000
## 
## Model: 
##                coeff        se         t         p      LLCI      ULCI
## constant     -9.0092    2.5556   -3.5253    0.0005  -14.0324   -3.9860
## STRESS        0.5374    0.1779    3.0211    0.0027    0.1878    0.8871
## S_MSR         2.1106    0.5361    3.9367    0.0001    1.0568    3.1645
## T_SM         -0.0266    0.1191   -0.2234    0.8234   -0.2607    0.2075
## Int_1         0.0211    0.0264    0.8013    0.4234   -0.0307    0.0729
## T_SE          1.8153    0.5062    3.5858    0.0004    0.8203    2.8104
## Int_2        -0.3730    0.1127   -3.3088    0.0010   -0.5947   -0.1514
## ADULTS_NUM    0.1955    0.1114    1.7557    0.0799   -0.0234    0.4144
## MINORS_NUM    0.0943    0.0959    0.9835    0.3259   -0.0942    0.2829
## INCOME       -0.0000    0.0000   -0.8162    0.4148   -0.0000    0.0000
## SEX           0.3994    0.1621    2.4631    0.0142    0.0807    0.7181
## AGE           0.0113    0.0094    1.2076    0.2279   -0.0071    0.0298
## 
## Product terms key:
## Int_1  :  S_MSR  x  T_SM      
## Int_2  :  S_MSR  x  T_SE      
## 
## Test(s) of highest order unconditional interaction(s):
##           R2-chng         F       df1       df2         p
## M*W        0.0013    0.6421    1.0000  426.0000    0.4234
## M*Z        0.0224   10.9481    1.0000  426.0000    0.0010
## BOTH(M)    0.0245    5.9873    2.0000  426.0000    0.0027
## ----------
## Focal predictor: S_MSR (M)
##       Moderator: T_SM (W)
##       Moderator: T_SE (Z)
## 
## Conditional effects of the focal predictor at values of the moderator(s):
##        T_SM      T_SE    effect        se         t         p      LLCI
##     -2.6667    3.2941    0.8254    0.2193    3.7639    0.0002    0.3944
##     -2.6667    4.4118    0.4085    0.1977    2.0662    0.0394    0.0199
##     -2.6667    5.2941    0.0794    0.2333    0.3402    0.7339   -0.3791
##      0.0000    3.2941    0.8818    0.2230    3.9534    0.0001    0.4434
##      0.0000    4.4118    0.4648    0.1783    2.6077    0.0094    0.1145
##      0.0000    5.2941    0.1357    0.2000    0.6783    0.4980   -0.2575
##      3.6667    3.2941    0.9592    0.2611    3.6741    0.0003    0.4461
##      3.6667    4.4118    0.5423    0.1946    2.7872    0.0056    0.1599
##      3.6667    5.2941    0.2131    0.1907    1.1177    0.2643   -0.1617
##        ULCI
##      1.2565
##      0.7971
##      0.5379
##      1.3202
##      0.8152
##      0.5289
##      1.4724
##      0.9247
##      0.5880
## 
## Data for visualizing the conditional effect of the focal predictor:
##       S_MSR      T_SM      T_SE   AVG.RPE
##      3.6429   -2.6667    3.2941    3.6335
##      4.1429   -2.6667    3.2941    4.0462
##      5.4114   -2.6667    3.2941    5.0934
##      3.6429   -2.6667    4.4118    4.1436
##      4.1429   -2.6667    4.4118    4.3478
##      5.4114   -2.6667    4.4118    4.8660
##      3.6429   -2.6667    5.2941    4.5462
##      4.1429   -2.6667    5.2941    4.5859
##      5.4114   -2.6667    5.2941    4.6866
##      3.6429    0.0000    3.2941    3.7678
##      4.1429    0.0000    3.2941    4.2087
##      5.4114    0.0000    3.2941    5.3273
##      3.6429    0.0000    4.4118    4.2778
##      4.1429    0.0000    4.4118    4.5102
##      5.4114    0.0000    4.4118    5.0999
##      3.6429    0.0000    5.2941    4.6805
##      4.1429    0.0000    5.2941    4.7483
##      5.4114    0.0000    5.2941    4.9205
##      3.6429    3.6667    3.2941    3.9524
##      4.1429    3.6667    3.2941    4.4320
##      5.4114    3.6667    3.2941    5.6489
##      3.6429    3.6667    4.4118    4.4624
##      4.1429    3.6667    4.4118    4.7336
##      5.4114    3.6667    4.4118    5.4215
##      3.6429    3.6667    5.2941    4.8651
##      4.1429    3.6667    5.2941    4.9717
##      5.4114    3.6667    5.2941    5.2421
## 
## *********************************************************************** 
## Bootstrapping progress:
## 
  |                                                                    
  |                                                              |   0%
  |                                                                    
  |                                                              |   1%
  |                                                                    
  |>                                                             |   1%
  |                                                                    
  |>                                                             |   2%
  |                                                                    
  |>>                                                            |   2%
  |                                                                    
  |>>                                                            |   3%
  |                                                                    
  |>>                                                            |   4%
  |                                                                    
  |>>>                                                           |   4%
  |                                                                    
  |>>>                                                           |   5%
  |                                                                    
  |>>>                                                           |   6%
  |                                                                    
  |>>>>                                                          |   6%
  |                                                                    
  |>>>>                                                          |   7%
  |                                                                    
  |>>>>>                                                         |   7%
  |                                                                    
  |>>>>>                                                         |   8%
  |                                                                    
  |>>>>>                                                         |   9%
  |                                                                    
  |>>>>>>                                                        |   9%
  |                                                                    
  |>>>>>>                                                        |  10%
  |                                                                    
  |>>>>>>>                                                       |  10%
  |                                                                    
  |>>>>>>>                                                       |  11%
  |                                                                    
  |>>>>>>>                                                       |  12%
  |                                                                    
  |>>>>>>>>                                                      |  12%
  |                                                                    
  |>>>>>>>>                                                      |  13%
  |                                                                    
  |>>>>>>>>                                                      |  14%
  |                                                                    
  |>>>>>>>>>                                                     |  14%
  |                                                                    
  |>>>>>>>>>                                                     |  15%
  |                                                                    
  |>>>>>>>>>>                                                    |  15%
  |                                                                    
  |>>>>>>>>>>                                                    |  16%
  |                                                                    
  |>>>>>>>>>>                                                    |  17%
  |                                                                    
  |>>>>>>>>>>>                                                   |  17%
  |                                                                    
  |>>>>>>>>>>>                                                   |  18%
  |                                                                    
  |>>>>>>>>>>>                                                   |  19%
  |                                                                    
  |>>>>>>>>>>>>                                                  |  19%
  |                                                                    
  |>>>>>>>>>>>>                                                  |  20%
  |                                                                    
  |>>>>>>>>>>>>>                                                 |  20%
  |                                                                    
  |>>>>>>>>>>>>>                                                 |  21%
  |                                                                    
  |>>>>>>>>>>>>>                                                 |  22%
  |                                                                    
  |>>>>>>>>>>>>>>                                                |  22%
  |                                                                    
  |>>>>>>>>>>>>>>                                                |  23%
  |                                                                    
  |>>>>>>>>>>>>>>>                                               |  23%
  |                                                                    
  |>>>>>>>>>>>>>>>                                               |  24%
  |                                                                    
  |>>>>>>>>>>>>>>>                                               |  25%
  |                                                                    
  |>>>>>>>>>>>>>>>>                                              |  25%
  |                                                                    
  |>>>>>>>>>>>>>>>>                                              |  26%
  |                                                                    
  |>>>>>>>>>>>>>>>>                                              |  27%
  |                                                                    
  |>>>>>>>>>>>>>>>>>                                             |  27%
  |                                                                    
  |>>>>>>>>>>>>>>>>>                                             |  28%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>                                            |  28%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>                                            |  29%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>                                            |  30%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>                                           |  30%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>                                           |  31%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>                                          |  31%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>                                          |  32%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>                                          |  33%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>                                         |  33%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>                                         |  34%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>                                         |  35%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>                                        |  35%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>                                        |  36%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>                                       |  36%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>                                       |  37%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>                                       |  38%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>                                      |  38%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>                                      |  39%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>                                      |  40%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>                                     |  40%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>                                     |  41%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>                                    |  41%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>                                    |  42%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>                                    |  43%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>                                   |  43%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>                                   |  44%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>                                  |  44%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>                                  |  45%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>                                  |  46%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                                 |  46%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                                 |  47%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                                 |  48%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                                |  48%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                                |  49%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                               |  49%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                               |  50%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                               |  51%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                              |  51%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                              |  52%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                             |  52%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                             |  53%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                             |  54%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                            |  54%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                            |  55%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                            |  56%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                           |  56%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                           |  57%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                          |  57%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                          |  58%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                          |  59%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                         |  59%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                         |  60%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                        |  60%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                        |  61%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                        |  62%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                       |  62%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                       |  63%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                       |  64%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                      |  64%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                      |  65%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                     |  65%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                     |  66%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                     |  67%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                    |  67%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                    |  68%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                    |  69%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                   |  69%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                   |  70%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                  |  70%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                  |  71%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                  |  72%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                 |  72%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                 |  73%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                |  73%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                |  74%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>                |  75%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>               |  75%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>               |  76%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>               |  77%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>              |  77%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>              |  78%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>             |  78%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>             |  79%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>             |  80%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>            |  80%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>            |  81%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>           |  81%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>           |  82%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>           |  83%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>          |  83%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>          |  84%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>          |  85%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>         |  85%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>         |  86%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>        |  86%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>        |  87%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>        |  88%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>       |  88%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>       |  89%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>       |  90%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>      |  90%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>      |  91%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>     |  91%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>     |  92%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>     |  93%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>    |  93%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>    |  94%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>   |  94%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>   |  95%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>   |  96%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>  |  96%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>  |  97%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>  |  98%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> |  98%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> |  99%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>|  99%
  |                                                                    
  |>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>| 100%
## 
## **************** DIRECT AND INDIRECT EFFECTS OF X ON Y ****************
## 
## Direct effect of X on Y:
##      effect        se         t         p      LLCI      ULCI
##      0.5374    0.1779    3.0211    0.0027    0.1878    0.8871
## 
## Conditional indirect effects of X on Y:
## 
## INDIRECT EFFECT:
## 
## STRESS    ->    S_MSR    ->    AVG.RPE
## 
##        T_SM      T_SE    Effect    BootSE  BootLLCI  BootULCI
##     -2.6667    3.2941   -0.5553    0.1697   -0.8902   -0.2371
##     -2.6667    4.4118   -0.3219    0.1897   -0.7098    0.0348
##     -2.6667    5.2941   -0.0697    0.2427   -0.5713    0.3845
##      0.0000    3.2941   -0.5717    0.1640   -0.8975   -0.2576
##      0.0000    4.4118   -0.3549    0.1635   -0.6857   -0.0471
##      0.0000    5.2941   -0.1159    0.1997   -0.5269    0.2566
##      3.6667    3.2941   -0.5898    0.1810   -0.9509   -0.2342
##      3.6667    4.4118   -0.3959    0.1608   -0.7123   -0.0897
##      3.6667    5.2941   -0.1750    0.1720   -0.5180    0.1580
## 
## 
## ******************** ANALYSIS NOTES AND ERRORS ************************ 
## 
## Level of confidence for all confidence intervals in output: 95
## 
## Number of bootstraps for percentile bootstrap confidence intervals: 5000
## 
## W values in conditional tables are the 16th, 50th, and 84th percentiles.
## 
## Z values in conditional tables are the 16th, 50th, and 84th percentiles.

Interaction plots

Below I produce a plot of the stress (X) state multifactor self-regulation (M) relationship conditional on trait self-efficacy (Z).We can see that as stress increases state multifactor self-regulation decreases, however, this decrease appears to be less severe for individuals with higher trait self-efficacy. Thus, it appears higher trait self-efficacy appears to buffer the negative association between stress and state multifactor self-regulation.

x<-c(2.5,3.9,4.3,2.5,3.9,4.3,2.5,3.9,4.3)
z<-c(3.294,3.294,3.294,4.412,4.412,4.412,5.240,5.240,5.240)
m<-c(4.78,3.95,3.72,5.10,4.14,3.86,5.34,4.23,3.92)
wmarker<-c(15,15,15,16,16,16,17,17,17)
plot(y=m,x=x,cex=1.2,pch=wmarker,xlab="Stress (X)",
ylab="State Multifactor Self-Regulation (M)")
legend.txt<-c("low","medium", "high")
legend("topright", legend = legend.txt,cex=1,lty=c(1,3,6),lwd=c(2,3,2),
pch=c(15,16,17))
lines(x[z==3.294],m[z==3.294],lwd=2,lty=1,col="black")
lines(x[z==4.412],m[z==4.412],lwd=3,lty=3,col="black")
lines(x[z==5.240],m[z==5.240],lwd=2,lty=6,col="black")

Below I produce a plot of the state multifactor self-regulation (M) and average daily rate of perceived exertion (Y) relationship conditional on trait self-efficacy (Z). We can see that as state multifactor self-regulation decreases average rate of perceived exertion also decreases; however, this appears to only be true for those with medium to low trait self-efficacy. Thus, it appears high trait self efficacy is associated with more stable average daily rate of perceived exertion.

m<-c(3.64,4.14,5.37,3.64,4.14,5.37,3.64,4.14,5.37)
z<-c(3.294,3.294,3.294,4.412,4.412,4.412,5.240,5.240,5.240)
y<-c(3.78,4.11,4.91,4.22,4.37,4.73,4.59,4.59,4.59)
wmarker<-c(15,15,15,16,16,16,17,17,17)
plot(y=y,x=m,cex=1.2,pch=wmarker,xlab="State Multifactor Self-Regulation (M)",
ylab="Average Rate of Percieved Exertion (Y)")
legend.txt<-c("low","medium", "high")
legend("topleft", legend = legend.txt,cex=1,lty=c(1,3,6),lwd=c(2,3,2),
pch=c(15,16,17))
lines(m[z==3.294],y[z==3.294],lwd=2,lty=1,col="black")
lines(m[z==4.412],y[z==4.412],lwd=3,lty=3,col="black")
lines(m[z==5.240],y[z==5.240],lwd=2,lty=6,col="black")

Below I produce a plot of the magnitude of the indirect effect of stress (x) on average daily rate of perceived exertion (Y) through state multifactor self-regulation (M) relationship conditional on trait self-efficacy (Z). We can see that as trait self-efficacy increases the magnitude of the indirect effect of stress decreases due to the buffering and stabilizing effects of trait self-efficacy.

z<- c(3.294,4.412,5.240)
cie<- c(-.42,-.22,-.00)

plot(y=cie,x=z,pch=15,col="white",
     xlab="Trait Self-efficacy (Z)",
     ylab="Size of Negative Indirect Effect of Stress on Avg RPE")
lines(z, cie, col = "black", lwd = 2)

Conclusions

It appears greater stress was indirectly associated with lower average daily rate of perceived exertion through its negative association with state self-regulation. The negative indirect effects of stress appears to be conditional on trait self-efficacy. Specifically, higher self-efficacy tended to buffer the negative association between stress and state self-regulation meaning state self-regulation decreased less when participants had high trait self-efficacy. Furthermore, higher self-efficacy stabilized the positive relationship between state self-regulation and average daily rpe meaning individuals had a consistently greater rate of perceived exertion when they had high trait self-efficacy. Together this implies higher trait self- efficacy could contribute to more consistent effort during physical activity when individuals were under greater amounts of stress.