Library

# install.packages(c("haven", "dplyr", "janitor"))  # if not installed
library(dplyr)
## 
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
## 
##     filter, lag
## The following objects are masked from 'package:base':
## 
##     intersect, setdiff, setequal, union
library(janitor)
## 
## Attaching package: 'janitor'
## The following objects are masked from 'package:stats':
## 
##     chisq.test, fisher.test

Data

library(haven)
d1 <- read_sav("C:/R Analysis/antu/Ai University Data SPSS(MASTER).sav")

Data Check

names(d1)
##  [1] "Name"         "Insti_name"   "Sex"          "Age"          "Degree"      
##  [6] "Study_Field"  "Internet_use" "Femiliar_Ai"  "AI1"          "AI2"         
## [11] "AI3"          "AI4"          "AI5"          "AI6"          "AI7"         
## [16] "AI8"          "SM1"          "SM2"          "SM3"          "SM4"         
## [21] "SM5"          "SM6"          "SL1"          "SL2"          "SL3"         
## [26] "SL4"          "SL5"          "AP1"          "AP2"          "AP3"         
## [31] "AP4"          "AP5"          "MWB1"         "MWB2"         "MWB3"        
## [36] "MWB4"         "MWB5"         "MWB6"
str(d1)
## tibble [391 × 38] (S3: tbl_df/tbl/data.frame)
##  $ Name        : chr [1:391] "Mahadi Hassan Antu" "Sanjida Sultana" "Fariya akter" "Yamin hasan" ...
##   ..- attr(*, "label")= chr " Name"
##   ..- attr(*, "format.spss")= chr "A47"
##   ..- attr(*, "display_width")= int 21
##  $ Insti_name  : chr [1:391] "Primeasia University" "Primeasia University" "North South University" "North South University" ...
##   ..- attr(*, "label")= chr " Institution Name"
##   ..- attr(*, "format.spss")= chr "A51"
##   ..- attr(*, "display_width")= int 37
##  $ Sex         : dbl+lbl [1:391] 1, 2, 2, 1, 2, 2, 2, 1, 2, 1, 2, 2, 2, 1, 2, 2, 2, 2, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:2] 1 2
##    .. ..- attr(*, "names")= chr [1:2] "Male" "Female"
##  $ Age         : num [1:391] 24 25 21 21 26 24 22 30 23 25 ...
##   ..- attr(*, "format.spss")= chr "F12.0"
##   ..- attr(*, "display_width")= int 12
##  $ Degree      : dbl+lbl [1:391] 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:2] 1 2
##    .. ..- attr(*, "names")= chr [1:2] "Bachelors" "Masters"
##  $ Study_Field : dbl+lbl [1:391] 2, 5, 5, 3, 2, 7, 1, 1, 2, 3, 2, 2, 2, 4, 3, 3, 4, 3, ...
##    ..@ label        : chr "Field of study"
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:7] 1 2 3 4 5 6 7
##    .. ..- attr(*, "names")= chr [1:7] "Business" "Engineering" "Science" "Social science" ...
##  $ Internet_use: dbl+lbl [1:391] 2, 1, 3, 3, 1, 2, 1, 3, 2, 2, 3, 1, 1, 1, 1, 2, 1, 2, ...
##    ..@ label        : chr "Daily Internet Usage"
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:3] 1 2 3
##    .. ..- attr(*, "names")= chr [1:3] "1-4 hours" "4-8 hours" "More than 8 hours"
##  $ Femiliar_Ai : dbl+lbl [1:391] 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
##    ..@ label        : chr "Femiliar with Ai"
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:2] 0 1
##    .. ..- attr(*, "names")= chr [1:2] "No" "Yes"
##  $ AI1         : dbl+lbl [1:391] 5, 4, 2, 4, 4, 5, 5, 5, 4, 5, 5, 1, 4, 4, 3, 5, 4, 3, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ AI2         : dbl+lbl [1:391] 4, 4, 4, 4, 4, 5, 1, 4, 3, 5, 4, 5, 3, 4, 3, 5, 2, 3, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ AI3         : dbl+lbl [1:391] 5, 3, 4, 4, 4, 5, 3, 4, 3, 5, 4, 3, 2, 4, 3, 4, 1, 4, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ AI4         : dbl+lbl [1:391] 3, 4, 4, 4, 4, 5, 2, 3, 1, 5, 5, 3, 3, 4, 3, 5, 1, 2, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ AI5         : dbl+lbl [1:391] 5, 4, 4, 4, 3, 5, 4, 4, 4, 5, 5, 4, 3, 4, 3, 4, 1, 3, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ AI6         : dbl+lbl [1:391] 5, 4, 4, 5, 4, 5, 4, 5, 4, 5, 4, 4, 5, 4, 4, 4, 1, 4, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ AI7         : dbl+lbl [1:391] 5, 4, 4, 4, 4, 5, 5, 4, 2, 5, 4, 5, 4, 4, 4, 4, 4, 3, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ AI8         : dbl+lbl [1:391] 4, 4, 4, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 4, 4, 1, 3, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ SM1         : dbl+lbl [1:391] 5, 4, 5, 5, 4, 5, 2, 4, 3, 5, 4, 3, 4, 2, 4, 3, 3, 3, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ SM2         : dbl+lbl [1:391] 4, 4, 5, 5, 3, 5, 4, 5, 1, 5, 4, 1, 5, 4, 4, 4, 4, 4, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ SM3         : dbl+lbl [1:391] 4, 4, 4, 5, 4, 4, 4, 5, 5, 5, 5, 2, 3, 4, 4, 3, 2, 2, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ SM4         : dbl+lbl [1:391] 1, 4, 3, 5, 4, 5, 2, 5, 1, 5, 4, 4, 5, 4, 4, 3, 3, 1, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ SM5         : dbl+lbl [1:391] 5, 4, 4, 4, 4, 5, 4, 4, 4, 5, 5, 4, 5, 2, 4, 4, 1, 3, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ SM6         : dbl+lbl [1:391] 5, 4, 4, 4, 4, 4, 3, 4, 2, 5, 4, 5, 4, 3, 4, 4, 3, 3, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ SL1         : dbl+lbl [1:391] 5, 4, 4, 5, 4, 5, 5, 4, 1, 5, 5, 4, 4, 4, 4, 4, 4, 4, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ SL2         : dbl+lbl [1:391] 5, 4, 4, 4, 3, 5, 4, 5, 3, 5, 4, 1, 4, 4, 4, 4, 3, 4, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ SL3         : dbl+lbl [1:391] 4, 4, 4, 5, 3, 5, 3, 4, 3, 5, 4, 1, 1, 4, 4, 4, 1, 4, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ SL4         : dbl+lbl [1:391] 5, 4, 5, 4, 4, 5, 4, 5, 3, 5, 4, 3, 3, 4, 4, 5, 4, 4, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ SL5         : dbl+lbl [1:391] 5, 4, 4, 4, 4, 5, 5, 4, 5, 5, 5, 1, 3, 4, 4, 4, 4, 3, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ AP1         : dbl+lbl [1:391] 4, 4, 5, 5, 4, 5, 3, 4, 4, 5, 4, 2, 3, 4, 4, 4, 1, 4, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ AP2         : dbl+lbl [1:391] 3, 4, 3, 5, 4, 5, 4, 4, 2, 5, 4, 4, 4, 4, 4, 4, 1, 3, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ AP3         : dbl+lbl [1:391] 4, 3, 3, 4, 4, 5, 5, 4, 1, 5, 4, 5, 4, 4, 4, 4, 1, 3, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ AP4         : dbl+lbl [1:391] 5, 3, 4, 4, 4, 5, 4, 4, 3, 5, 4, 5, 4, 4, 4, 4, 1, 3, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ AP5         : dbl+lbl [1:391] 3, 3, 4, 4, 4, 5, 4, 4, 2, 5, 4, 5, 3, 4, 4, 4, 1, 3, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ MWB1        : dbl+lbl [1:391] 5, 4, 4, 4, 4, 4, 4, 4, 4, 5, 4, 2, 4, 3, 4, 4, 1, 4, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ MWB2        : dbl+lbl [1:391] 4, 3, 4, 4, 3, 5, 3, 4, 3, 5, 4, 4, 3, 3, 4, 4, 3, 3, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ MWB3        : dbl+lbl [1:391] 5, 3, 4, 4, 4, 5, 3, 4, 3, 5, 4, 2, 4, 4, 4, 4, 1, 4, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ MWB4        : dbl+lbl [1:391] 5, 4, 3, 5, 3, 5, 4, 4, 1, 5, 5, 4, 3, 4, 4, 5, 3, 3, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ MWB5        : dbl+lbl [1:391] 2, 5, 5, 5, 3, 5, 5, 4, 1, 5, 4, 5, 4, 4, 4, 5, 3, 3, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
##  $ MWB6        : dbl+lbl [1:391] 5, 3, 5, 5, 3, 4, 4, 4, 3, 5, 4, 4, 1, 4, 4, 5, 3, 3, ...
##    ..@ format.spss  : chr "F12.0"
##    ..@ display_width: int 12
##    ..@ labels       : Named num [1:5] 1 2 3 4 5
##    .. ..- attr(*, "names")= chr [1:5] "Strongly Disagree" "Disagree" "Neutral" "Agree" ...
summary(d1)
##      Name            Insti_name             Sex             Age       
##  Length:391         Length:391         Min.   :1.000   Min.   :20.00  
##  Class :character   Class :character   1st Qu.:1.000   1st Qu.:22.00  
##  Mode  :character   Mode  :character   Median :1.000   Median :24.00  
##                                        Mean   :1.488   Mean   :23.85  
##                                        3rd Qu.:2.000   3rd Qu.:25.00  
##                                        Max.   :2.000   Max.   :45.00  
##      Degree       Study_Field     Internet_use    Femiliar_Ai    
##  Min.   :1.000   Min.   :1.000   Min.   :1.000   Min.   :0.0000  
##  1st Qu.:1.000   1st Qu.:2.000   1st Qu.:1.000   1st Qu.:1.0000  
##  Median :1.000   Median :3.000   Median :1.000   Median :1.0000  
##  Mean   :1.215   Mean   :2.977   Mean   :1.514   Mean   :0.9795  
##  3rd Qu.:1.000   3rd Qu.:4.000   3rd Qu.:2.000   3rd Qu.:1.0000  
##  Max.   :2.000   Max.   :7.000   Max.   :3.000   Max.   :1.0000  
##       AI1             AI2             AI3             AI4       
##  Min.   :1.000   Min.   :1.000   Min.   :1.000   Min.   :1.000  
##  1st Qu.:3.000   1st Qu.:3.000   1st Qu.:3.000   1st Qu.:3.000  
##  Median :4.000   Median :4.000   Median :4.000   Median :4.000  
##  Mean   :3.785   Mean   :3.754   Mean   :3.642   Mean   :3.558  
##  3rd Qu.:4.000   3rd Qu.:4.000   3rd Qu.:4.000   3rd Qu.:4.000  
##  Max.   :5.000   Max.   :5.000   Max.   :5.000   Max.   :5.000  
##       AI5             AI6             AI7             AI8            SM1       
##  Min.   :1.000   Min.   :1.000   Min.   :1.000   Min.   :1.00   Min.   :1.000  
##  1st Qu.:4.000   1st Qu.:4.000   1st Qu.:4.000   1st Qu.:3.00   1st Qu.:3.000  
##  Median :4.000   Median :4.000   Median :4.000   Median :4.00   Median :4.000  
##  Mean   :3.908   Mean   :3.972   Mean   :3.893   Mean   :3.79   Mean   :3.396  
##  3rd Qu.:4.000   3rd Qu.:5.000   3rd Qu.:4.000   3rd Qu.:4.00   3rd Qu.:4.000  
##  Max.   :5.000   Max.   :5.000   Max.   :5.000   Max.   :5.00   Max.   :5.000  
##       SM2             SM3             SM4             SM5       
##  Min.   :1.000   Min.   :1.000   Min.   :1.000   Min.   :1.000  
##  1st Qu.:3.000   1st Qu.:3.000   1st Qu.:3.000   1st Qu.:3.000  
##  Median :4.000   Median :4.000   Median :4.000   Median :4.000  
##  Mean   :3.824   Mean   :3.655   Mean   :3.417   Mean   :3.629  
##  3rd Qu.:4.000   3rd Qu.:4.000   3rd Qu.:4.000   3rd Qu.:4.000  
##  Max.   :5.000   Max.   :5.000   Max.   :5.000   Max.   :5.000  
##       SM6             SL1             SL2             SL3       
##  Min.   :1.000   Min.   :1.000   Min.   :1.000   Min.   :1.000  
##  1st Qu.:3.000   1st Qu.:4.000   1st Qu.:3.000   1st Qu.:3.000  
##  Median :4.000   Median :4.000   Median :4.000   Median :4.000  
##  Mean   :3.714   Mean   :4.092   Mean   :3.742   Mean   :3.619  
##  3rd Qu.:4.000   3rd Qu.:5.000   3rd Qu.:4.000   3rd Qu.:4.000  
##  Max.   :5.000   Max.   :5.000   Max.   :5.000   Max.   :5.000  
##       SL4             SL5             AP1             AP2       
##  Min.   :1.000   Min.   :1.000   Min.   :1.000   Min.   :1.000  
##  1st Qu.:4.000   1st Qu.:4.000   1st Qu.:3.000   1st Qu.:3.000  
##  Median :4.000   Median :4.000   Median :4.000   Median :4.000  
##  Mean   :3.997   Mean   :3.964   Mean   :3.432   Mean   :3.591  
##  3rd Qu.:5.000   3rd Qu.:5.000   3rd Qu.:4.000   3rd Qu.:4.000  
##  Max.   :5.000   Max.   :5.000   Max.   :5.000   Max.   :5.000  
##       AP3             AP4             AP5             MWB1      
##  Min.   :1.000   Min.   :1.000   Min.   :1.000   Min.   :1.000  
##  1st Qu.:3.000   1st Qu.:3.000   1st Qu.:3.000   1st Qu.:3.000  
##  Median :4.000   Median :4.000   Median :4.000   Median :4.000  
##  Mean   :3.601   Mean   :3.614   Mean   :3.586   Mean   :3.463  
##  3rd Qu.:4.000   3rd Qu.:4.000   3rd Qu.:4.000   3rd Qu.:4.000  
##  Max.   :5.000   Max.   :5.000   Max.   :5.000   Max.   :5.000  
##       MWB2            MWB3            MWB4           MWB5            MWB6      
##  Min.   :1.000   Min.   :1.000   Min.   :1.00   Min.   :1.000   Min.   :1.000  
##  1st Qu.:3.000   1st Qu.:3.000   1st Qu.:3.00   1st Qu.:3.000   1st Qu.:3.000  
##  Median :3.000   Median :4.000   Median :4.00   Median :4.000   Median :4.000  
##  Mean   :3.448   Mean   :3.698   Mean   :3.45   Mean   :3.593   Mean   :3.601  
##  3rd Qu.:4.000   3rd Qu.:4.000   3rd Qu.:4.00   3rd Qu.:4.000   3rd Qu.:4.000  
##  Max.   :5.000   Max.   :5.000   Max.   :5.00   Max.   :5.000   Max.   :5.000
library(psych)
describe(d1)

Convert SPSS labelled variables to factors

dat <- d1 %>% 
  mutate(
    across(
      .cols = c(Sex, Degree, Study_Field, Internet_use, Femiliar_Ai),
      .fns  = ~ as_factor(., levels = "labels")
    )
  )

Create age groups to match your table

library(labelled)  # for to_factor()
library(forcats)

d2 <- d1 %>% 
  # 1) Convert labelled vars to factors using their SPSS value labels
  mutate(
    Sex          = to_factor(Sex),          # 1 = Male, 2 = Female
    Degree       = to_factor(Degree),       # 1 = Bachelors, 2 = Masters
    Study_Field  = to_factor(Study_Field),  # 1–7 labeled in SPSS
    Internet_use = to_factor(Internet_use), # 1–4 hours, 4–8, >8
    Femiliar_Ai  = to_factor(Femiliar_Ai)   # 0 = No, 1 = Yes
  ) %>% 
  # 2) Recode study field labels if you want them exactly like the paper/table
  mutate(
    Study_Field = fct_recode(
      Study_Field,
      "Business"            = "Business",
      "Engineering"         = "Engineering",
      "Science"             = "Science",
      "Social science"      = "Social science",
      "Arts & Humanities"   = "Arts & Humanities",
      "Law"                 = "Law",
      "English"             = "English"
    ),
    # 3) Create age group variable to match the categories in your result table
    Age_cat = cut(
      Age,
      breaks = c(18, 22, 26, 30, Inf),
      right  = FALSE,
      labels = c("18–21 years", "22–25 years", "26–29 years", "More than 30 years")
    )
  )

Table 1

library(gtsummary) 
# Select only the variables you want in Table 1
table1_data <- d2 %>% 
  select(
    Sex,
    Age_cat,
    Degree,
    Study_Field,
    Internet_use,
    Femiliar_Ai
  )

# Build Table 1
table1 <- table1_data %>%
  tbl_summary(
    # Categorical variables (default here)
    type = list(everything() ~ "categorical"),
    
    # Nice labels for the rows (like in your Word table)
    label = list(
      Sex          ~ "Gender",
      Age_cat      ~ "Age",
      Degree       ~ "Education",
      Study_Field  ~ "Academic Year / Field of study",
      Internet_use ~ "Daily internet use for academic and problem-solving purposes",
      Femiliar_Ai  ~ 'Familiar with the term "Artificial Intelligence"'
    ),
    
    # Show Frequency and Percentage
    statistic = list(
      all_categorical() ~ "{n} ({p}%)"
    ),
    
    # Round percentages to 1 decimal place
    digits = list(
      all_categorical() ~ c(0, 1)
    ),
    
    # Do not show missing category rows (if any)
    missing = "no"
  ) %>%
  # Make the header look like a standard Table 1
  modify_header(
    all_stat_cols() ~ "**Frequency (n) / Percentage (%)**"
  ) %>%
  bold_labels()

# Print the table in RStudio / Quarto / etc.
table1
Characteristic Frequency (n) / Percentage (%)1
Gender
    Male 200 (51.2%)
    Female 191 (48.8%)
Age
    18–21 years 92 (23.5%)
    22–25 years 211 (54.0%)
    26–29 years 78 (19.9%)
    More than 30 years 10 (2.6%)
Education
    Bachelors 307 (78.5%)
    Masters 84 (21.5%)
Academic Year / Field of study
    Business 66 (16.9%)
    Engineering 70 (17.9%)
    Science 146 (37.3%)
    Social science 60 (15.3%)
    Arts & Humanities 26 (6.6%)
    Law 11 (2.8%)
    English 12 (3.1%)
Daily internet use for academic and problem-solving purposes
    1-4 hours 236 (60.4%)
    4-8 hours 109 (27.9%)
    More than 8 hours 46 (11.8%)
Familiar with the term "Artificial Intelligence"
    No 8 (2.0%)
    Yes 383 (98.0%)
1 n (%)

Measurement Model Results

library(dplyr)
library(purrr)
library(psych)
library(lavaan)
## This is lavaan 0.6-20
## lavaan is FREE software! Please report any bugs.
## 
## Attaching package: 'lavaan'
## The following object is masked from 'package:psych':
## 
##     cor2cov
library(tibble)

# -----------------------------
# 1.1 Measurement model (if not yet run)
# -----------------------------
mm_model <- '
  AI  =~ AI1 + AI2 + AI3 + AI4 + AI5 + AI6 + AI7 + AI8
  SM  =~ SM1 + SM2 + SM3 + SM4 + SM5 + SM6
  SL  =~ SL1 + SL2 + SL3 + SL4 + SL5
  AP  =~ AP1 + AP2 + AP3 + AP4 + AP5
  MWB =~ MWB1 + MWB2 + MWB3 + MWB4 + MWB5 + MWB6
'

fit_mm <- cfa(
  model     = mm_model,
  data      = d1,
  std.lv    = TRUE,
  estimator = "MLR"
)

# -----------------------------
# 1.2 Make numeric version of Likert items for alpha
# -----------------------------
likert_items <- c(
  paste0("AI", 1:8),
  paste0("SM", 1:6),
  paste0("SL", 1:5),
  paste0("AP", 1:5),
  paste0("MWB", 1:6)
)

d1_num <- d1 %>%
  mutate(
    across(
      all_of(likert_items),
      ~ as.numeric(unclass(.x))  # drop haven_labelled, keep 1–5
    )
  )

# -----------------------------
# 1.3 Cronbach's alpha per construct
# -----------------------------
scale_list <- list(
  AI  = paste0("AI", 1:8),
  SM  = paste0("SM", 1:6),
  SL  = paste0("SL", 1:5),
  AP  = paste0("AP", 1:5),
  MWB = paste0("MWB", 1:6)
)

alpha_tbl <- imap_dfr(
  scale_list,
  ~ tibble(
      Construct = .y,
      alpha     = psych::alpha(d1_num[, .x])$total[["raw_alpha"]]
    )
)

# -----------------------------
# 1.4 Standardized loadings from CFA
# -----------------------------
std_sol <- standardizedSolution(fit_mm)

loadings_tbl <- std_sol %>%
  filter(op == "=~") %>%
  transmute(
    Construct = lhs,
    Items     = rhs,
    FL        = est.std
  )

# -----------------------------
# 1.5 CR and AVE (correct formulas)
# -----------------------------
cr_ave_tbl <- loadings_tbl %>%
  group_by(Construct) %>%
  summarise(
    k            = n(),
    lambda_sum   = sum(FL),
    lambda_sqsum = sum(FL^2),
    theta_sum    = sum(1 - FL^2),
    CR           = (lambda_sum^2) / (lambda_sum^2 + theta_sum),
    AVE          = lambda_sqsum / k,
    .groups      = "drop"
  ) %>%
  select(Construct, CR, AVE)
# Item wording taken from Ai_university_result_tables.docx
item_text <- c(
  AI1  = "AI Tools enhance learning performance.",
  AI2  = "AI Tools make learning more efficient.",
  AI3  = "AI Tools Positively impact our learning outcomes.",
  AI4  = "AI Tools make it easier for people to work together.",
  AI5  = "AI Tools Optimize the way of learning.",
  AI6  = "AI Tools are impressive with their various features and multifaceted capabilities",
  AI7  = "AI Tools are game-changers for enhancing the learning experience",
  AI8  = "AI Tools helps to ask more precise follow-up Questions",
  
  SM1  = "Social media platforms enhance the feeling of community and make learning more interactive",
  SM2  = "Social media platforms enable faster peer feedback",
  SM3  = "Social media platforms speed up instructor/teacher’s feedback on students.",
  SM4  = "Social media platforms boost class participation",
  SM5  = "Social media platforms help with multitasking during study session",
  SM6  = "Social media platforms are strong channels for effective academic communications.",
  
  SL1  = "With smart technologies, I find myself different topics.",
  SL2  = "With smart technologies, I find myself participating and engaging in group learning activity.",
  SL3  = "With smart technologies, I find myself performing better than others.",
  SL4  = "With smart technologies, I enjoy exploring new topics.",
  SL5  = "With smart technologies, I find myself enjoying discovering and learning new applications.",
  
  AP1  = "I am experiencing an increase in the amount of studying I accomplish.",
  AP2  = "I am improving the quality of my study sessions.",
  AP3  = "I am advancing toward my personal career objectives.",
  AP4  = "I am advancing in acquiring the skills necessary for my future career.",
  AP5  = "I am actively pursuing career development opportunities and making progress.",
  
  MWB1 = "I have been feeling optimistic.",
  MWB2 = "I have been feeling calm.",
  MWB3 = "I have been handling problems effectively.",
  MWB4 = "I have been thinking clearly and focusing well.",
  MWB5 = "I have been feeling connected and close to others.",
  MWB6 = "I have been able to make decisions confidently."
)
# Nice labels for construct names (like your doc)
construct_labels <- c(
  AI  = "Artificial intelligence (AI)",
  SM  = "Social media (SM)",
  SL  = "Smart learning (SL)",
  AP  = "Academic performance (AP)",
  MWB = "Mental well-being (MWB)"
)

# Merge everything into one table
table2 <- loadings_tbl %>%
  left_join(alpha_tbl,   by = "Construct") %>%
  left_join(cr_ave_tbl,  by = "Construct") %>%
  group_by(Construct) %>%
  mutate(
    # round values
    FL    = round(FL, 3),
    alpha = if_else(row_number() == 1, round(alpha, 3), NA_real_),
    CR    = if_else(row_number() == 1, round(CR, 3),    NA_real_),
    AVE   = if_else(row_number() == 1, round(AVE, 3),   NA_real_),
    # description column as in Word table (3rd column is unnamed)
    Description = item_text[Items],
    # leave Source blank for now (you can add scale references)
    Source      = ""
  ) %>%
  ungroup() %>%
  # replace construct codes with full labels
  mutate(
    Construct = construct_labels[Construct]
  ) %>%
  # order constructs like in your doc
  arrange(factor(Construct, levels = construct_labels)) %>%
  select(
    Construct,
    Items,
    Description,
    FL,
    alpha,
    CR,
    AVE,
    Source
  )

table2
library(dplyr)
library(tidyr)
## 
## Attaching package: 'tidyr'
## The following objects are masked _by_ '.GlobalEnv':
## 
##     table1, table2
library(lavaan)
library(tibble)

# --------------------------
# 1. Get AVE from earlier code
# --------------------------
AVE_tbl <- cr_ave_tbl   # Construct, AVE

# Compute √AVE
sqrt_AVE <- AVE_tbl %>%
  mutate(sqrt_AVE = sqrt(AVE)) %>%
  select(Construct, sqrt_AVE)

# --------------------------
# 2. Extract latent correlations from CFA
# --------------------------
latent_corr <- lavInspect(fit_mm, "cor.lv")  # latent variable correlation matrix

latent_corr_tbl <- as.data.frame(latent_corr)
latent_corr_tbl$Construct <- rownames(latent_corr_tbl)

# Convert to tidy format
latent_corr_long <- latent_corr_tbl %>%
  relocate(Construct)

# --------------------------
# 3. Replace diagonal with √AVE
# --------------------------
constructs <- rownames(latent_corr)

# Create final Fornell–Larcker matrix
FL_matrix <- latent_corr_long

for (i in seq_along(constructs)) {
  FL_matrix[i, constructs[i]] <- sqrt_AVE$sqrt_AVE[sqrt_AVE$Construct == constructs[i]]
}

# --------------------------
# 4. Arrange and format nicely
# --------------------------
table3 <- FL_matrix %>%
  mutate(across(where(is.numeric), ~ round(.x, 3)))

table3
############################################
# Table 4 – Discriminant Validity (HTMT)
############################################

#---------------------------
# 1. Load packages
#---------------------------
library(dplyr)
library(tibble)
library(gt)

#---------------------------
# 2. Ensure Likert items are numeric (d1_num)
#---------------------------

# List all Likert items
likert_items <- c(
  paste0("AI", 1:8),
  paste0("SM", 1:6),
  paste0("SL", 1:5),
  paste0("AP", 1:5),
  paste0("MWB", 1:6)
)

# Create numeric version of d1 for correlation-based calculations
d1_num <- d1 %>%
  mutate(
    across(
      all_of(likert_items),
      ~ as.numeric(unclass(.x))  # drop haven_labelled, keep 1–5 codes
    )
  )

#---------------------------
# 3. Define constructs (items per latent)
#---------------------------

construct_items <- list(
  AI  = paste0("AI", 1:8),
  SM  = paste0("SM", 1:6),
  SL  = paste0("SL", 1:5),
  AP  = paste0("AP", 1:5),
  MWB = paste0("MWB", 1:6)
)

construct_names <- names(construct_items)

#---------------------------
# 4. Function to compute HTMT for a pair of constructs
#---------------------------

compute_htmt_pair <- function(df, items_a, items_b) {
  # Subset data to items from both constructs
  sub_df <- df[, c(items_a, items_b)]

  # Item–item correlation matrix
  R <- cor(sub_df, use = "pairwise.complete.obs")

  # Within-construct correlation submatrices
  Ra  <- R[items_a, items_a, drop = FALSE]
  Rb  <- R[items_b, items_b, drop = FALSE]

  # Between-construct correlations
  Rab <- R[items_a, items_b, drop = FALSE]

  # Mean of absolute within-construct correlations (upper triangle, no diagonal)
  mean_aa <- mean(abs(Ra[upper.tri(Ra)]), na.rm = TRUE)
  mean_bb <- mean(abs(Rb[upper.tri(Rb)]), na.rm = TRUE)

  # Mean of absolute between-construct correlations
  mean_ab <- mean(abs(Rab), na.rm = TRUE)

  # HTMT ratio
  htmt <- mean_ab / sqrt(mean_aa * mean_bb)
  return(htmt)
}

#---------------------------
# 5. Build HTMT matrix
#---------------------------

htmt_mat <- matrix(
  NA_real_,
  nrow = length(construct_names),
  ncol = length(construct_names),
  dimnames = list(construct_names, construct_names)
)

for (i in seq_along(construct_names)) {
  for (j in seq_along(construct_names)) {
    if (i < j) {
      htmt_val <- compute_htmt_pair(
        df      = d1_num,
        items_a = construct_items[[construct_names[i]]],
        items_b = construct_items[[construct_names[j]]]
      )
      htmt_mat[i, j] <- htmt_val
      htmt_mat[j, i] <- htmt_val  # symmetric
    }
  }
}

#---------------------------
# 6. Convert to Table 4 format
#---------------------------

table4 <- htmt_mat %>%
  as.data.frame() %>%
  rownames_to_column("Construct") %>%
  mutate(
    across(
      where(is.numeric),
      ~ round(.x, 3)  # round HTMT values
    )
  )

# View HTMT matrix
table4
#---------------------------
# 7. Nice formatted table (optional)
#---------------------------

table4_gt <- table4 %>%
  gt() %>%
  tab_header(
    title = "Table 4. Discriminant validity assessment through Heterotrait–Monotrait (HTMT) ratio"
  )

table4_gt
Table 4. Discriminant validity assessment through Heterotrait–Monotrait (HTMT) ratio
Construct AI SM SL AP MWB
AI NA 0.690 0.753 0.680 0.682
SM 0.690 NA 0.753 0.666 0.743
SL 0.753 0.753 NA 0.792 0.749
AP 0.680 0.666 0.792 NA 0.855
MWB 0.682 0.743 0.749 0.855 NA
########################################################
# Structural Model Figure using lavaan + lavaanPlot
########################################################

## -----------------------------------------------------
## 0. Install & load packages (run install once)
## -----------------------------------------------------
# install.packages("lavaan")
# install.packages("lavaanPlot")
# install.packages("dplyr")

library(lavaan)
library(lavaanPlot)
library(dplyr)

## -----------------------------------------------------
## 1. Specify SEM model (measurement + structural)
## -----------------------------------------------------

sem_model <- '
  ######################################################
  # Measurement model
  ######################################################
  AI  =~ AI1 + AI2 + AI3 + AI4 + AI5 + AI6 + AI7 + AI8
  SM  =~ SM1 + SM2 + SM3 + SM4 + SM5 + SM6
  SL  =~ SL1 + SL2 + SL3 + SL4 + SL5
  AP  =~ AP1 + AP2 + AP3 + AP4 + AP5
  MWB =~ MWB1 + MWB2 + MWB3 + MWB4 + MWB5 + MWB6

  ######################################################
  # Structural model (paths between latent variables)
  ######################################################
  SL  ~ AI + SM          # AI → SL, SM → SL
  AP  ~ AI + SL          # AI → AP, SL → AP
  MWB ~ AI + SM + SL     # AI, SM, SL → MWB
'

## -----------------------------------------------------
## 2. Fit SEM with lavaan
## -----------------------------------------------------

fit_sem <- sem(
  model     = sem_model,
  data      = d1,       # your data frame
  std.lv    = TRUE,     # latent variance = 1
  estimator = "MLR"     # robust ML
)

# Optional: check results
# summary(fit_sem, standardized = TRUE, fit.measures = TRUE)

## -----------------------------------------------------
## 3. Get R² for endogenous latent variables
## -----------------------------------------------------

r2 <- inspect(fit_sem, "r2")
print(r2)   # you’ll see SL, AP, MWB at the end
##   AI1   AI2   AI3   AI4   AI5   AI6   AI7   AI8   SM1   SM2   SM3   SM4   SM5 
## 0.334 0.524 0.552 0.364 0.572 0.380 0.548 0.522 0.481 0.509 0.526 0.423 0.490 
##   SM6   SL1   SL2   SL3   SL4   SL5   AP1   AP2   AP3   AP4   AP5  MWB1  MWB2 
## 0.445 0.539 0.444 0.548 0.547 0.577 0.534 0.525 0.734 0.739 0.626 0.505 0.452 
##  MWB3  MWB4  MWB5  MWB6    SL    AP   MWB 
## 0.541 0.592 0.336 0.513 0.667 0.618 0.611
## -----------------------------------------------------
## 4. Build labels (pretty names + R² for latent variables)
## -----------------------------------------------------

# Get latent and observed variable names from the model
latent_names   <- lavNames(fit_sem, type = "lv")
observed_names <- lavNames(fit_sem, type = "ov")

# Start labels list with default names
labels_list <- as.list(c(observed_names, latent_names))
names(labels_list) <- c(observed_names, latent_names)

# Overwrite labels for latent variables with nice text + R²
labels_list[["AI"]]  <- "Artificial\nIntelligence"
labels_list[["SM"]]  <- "Social\nMedia"
labels_list[["SL"]]  <- paste0("Smart Learning\nR² = ",
                               round(r2["SL"], 3))
labels_list[["AP"]]  <- paste0("Academic\nPerformance\nR² = ",
                               round(r2["AP"], 3))
labels_list[["MWB"]] <- paste0("Mental Well-being\nR² = ",
                               round(r2["MWB"], 3))

## -----------------------------------------------------
## 5. Plot structural model (similar to your Fig. 2)
## -----------------------------------------------------

lavaanPlot(
  model  = fit_sem,
  labels = labels_list,
  coefs  = TRUE,        # show coefficients on paths
  stand  = TRUE,        # standardized estimates
  covs   = FALSE,       # hide covariances (cleaner)
  stars  = "regress",   # significance stars for regressions
  sig    = 0.05,

  graph_options = list(
    rankdir = "LR"      # left-to-right layout
  ),

  node_options = list(
    shape     = "box",  # boxes for all nodes (you can switch to "circle")
    fontname  = "Helvetica",
    fontsize  = 10,
    fixedsize = "false"
  ),

  edge_options = list(
    color     = "black",
    arrowsize = 0.7,
    fontname  = "Helvetica",
    fontsize  = 9
  )
)
########################################################
# (Optional) Save figure as image — run if you want
########################################################
# library(DiagrammeRsvg)
# library(rsvg)
#
# p <- lavaanPlot(
#   model  = fit_sem,
#   labels = labels_list,
#   coefs  = TRUE,
#   stand  = TRUE,
#   covs   = FALSE,
#   stars  = "regress",
#   sig    = 0.05,
#   graph_options = list(rankdir = "LR"),
#   node_options  = list(shape = "box", fontname = "Helvetica",
#                        fontsize = 10, fixedsize = "false"),
#   edge_options  = list(color = "black", arrowsize = 0.7,
#                        fontname = "Helvetica", fontsize = 9)
# )
#
# DiagrammeRsvg::export_svg(p) |>
#   charToRaw() |>
#   rsvg::rsvg_png("structural_model.png", width = 2000, height = 1200)
############################################
# Table 5 – Structural model (lavaan)
############################################

library(dplyr)
library(tibble)
library(gt)    # optional, for nice table output

# 1) Extract structural paths from lavaan
pe <- parameterEstimates(
  fit_sem,
  standardized = TRUE,  # get std.all
  ci = TRUE             # for confidence intervals if needed
)

# Keep only regression paths between latent variables (structural paths)
struct_paths <- pe %>%
  filter(
    op == "~",
    lhs %in% c("SL", "AP", "MWB"),        # endogenous latent vars
    rhs %in% c("AI", "SM", "SL")          # predictors in your model
  ) %>%
  select(
    lhs, rhs,
    est, se, z, pvalue,
    std.all,
    ci.lower, ci.upper
  )

struct_paths
# 2) Define hypothesis mapping (adjust text if needed)
hypothesis_map <- tribble(
  ~lhs,  ~rhs, ~Hypothesis, ~Relationship,
  "SL",  "AI", "H1", "AI → SL",
  "SL",  "SM", "H2", "SM → SL",
  "AP",  "AI", "H3", "AI → AP",
  "AP",  "SL", "H4", "SL → AP",
  "MWB", "AI", "H5", "AI → MWB",
  "MWB", "SM", "H6", "SM → MWB",
  "MWB", "SL", "H7", "SL → MWB"
)

# 3) Join, round, and add decision column
table5 <- struct_paths %>%
  left_join(hypothesis_map, by = c("lhs", "rhs")) %>%
  mutate(
    Beta   = round(std.all, 3),
    t      = round(z, 3),
    p      = round(pvalue, 3),
    Decision = if_else(pvalue < 0.05, "Supported", "Not supported")
  ) %>%
  arrange(Hypothesis) %>%
  select(
    Hypothesis,
    Relationship,
    Beta,
    t,
    p,
    Decision
    # You can keep these extra columns if you want:
    # est, se, ci.lower, ci.upper
  )

table5
# 4) Optional: nice journal-style table
table5_gt <- table5 %>%
  gt() %>%
  tab_header(
    title = "Table 5. Structural Model Results"
  ) %>%
  cols_label(
    Hypothesis   = "Hypothesis",
    Relationship = "Path",
    Beta         = "Std. β",
    t            = "t-value",
    p            = "p-value",
    Decision     = "Decision"
  )

table5_gt
Table 5. Structural Model Results
Hypothesis Path Std. β t-value p-value Decision
H1 AI → SL 0.434 5.117 0.000 Supported
H2 SM → SL 0.457 4.296 0.000 Supported
H3 AI → AP 0.197 1.611 0.107 Not supported
H4 SL → AP 0.628 4.905 0.000 Supported
H5 AI → MWB 0.186 1.651 0.099 Not supported
H6 SM → MWB 0.291 3.585 0.000 Supported
H7 SL → MWB 0.381 3.003 0.003 Supported
r2 <- inspect(fit_sem, "r2")
r2
##   AI1   AI2   AI3   AI4   AI5   AI6   AI7   AI8   SM1   SM2   SM3   SM4   SM5 
## 0.334 0.524 0.552 0.364 0.572 0.380 0.548 0.522 0.481 0.509 0.526 0.423 0.490 
##   SM6   SL1   SL2   SL3   SL4   SL5   AP1   AP2   AP3   AP4   AP5  MWB1  MWB2 
## 0.445 0.539 0.444 0.548 0.547 0.577 0.534 0.525 0.734 0.739 0.626 0.505 0.452 
##  MWB3  MWB4  MWB5  MWB6    SL    AP   MWB 
## 0.541 0.592 0.336 0.513 0.667 0.618 0.611
r2_df <- tibble(
  Endogenous = c("SL", "AP", "MWB"),
  R2         = round(as.numeric(r2[c("SL", "AP", "MWB")]), 3)
)

r2_df
############################################################
# Table 5 – Structural model (direct + mediation effects)
############################################################

library(lavaan)
library(dplyr)
library(tibble)
library(gt)

############################################################
# 1. SEM with labeled paths + indirect effects
############################################################

sem_model_table5 <- '
  # -------------------------
  # Measurement model
  # -------------------------
  AI  =~ AI1 + AI2 + AI3 + AI4 + AI5 + AI6 + AI7 + AI8
  SM  =~ SM1 + SM2 + SM3 + SM4 + SM5 + SM6
  SL  =~ SL1 + SL2 + SL3 + SL4 + SL5
  AP  =~ AP1 + AP2 + AP3 + AP4 + AP5
  MWB =~ MWB1 + MWB2 + MWB3 + MWB4 + MWB5 + MWB6

  # -------------------------
  # Structural model (label paths)
  # -------------------------
  SL  ~ a1*AI + a2*SM          # AI -> SL, SM -> SL
  AP  ~ c1*AI + b1*SL          # AI -> AP, SL -> AP
  MWB ~ c2*AI + c3*SM + b2*SL  # AI, SM, SL -> MWB

  # -------------------------
  # Indirect (mediation) effects via SL
  # -------------------------
  ind_AI_AP   := a1*b1   # AI -> SL -> AP
  ind_AI_MWB  := a1*b2   # AI -> SL -> MWB
  ind_SM_AP   := a2*b1   # SM -> SL -> AP
  ind_SM_MWB  := a2*b2   # SM -> SL -> MWB
'

############################################################
# 2. Fit model  (ML + bootstrap for indirect effects)
############################################################

fit_table5 <- sem(
  model     = sem_model_table5,
  data      = d1,
  std.lv    = TRUE,
  estimator = "ML",          # <- key change (not MLR)
  se        = "bootstrap",   # bootstrap SE for indirects
  bootstrap = 2000
)

# Optional: check fit
# summary(fit_table5, standardized = TRUE, fit.measures = TRUE)

############################################################
# 3. Extract standardized parameter estimates
############################################################

pe <- parameterEstimates(
  fit_table5,
  standardized = TRUE,
  ci = TRUE
)

# Direct structural paths (between latent variables)
direct_paths <- pe %>%
  filter(
    op == "~",
    lhs %in% c("SL", "AP", "MWB"),
    rhs %in% c("AI", "SM", "SL")
  ) %>%
  transmute(
    lhs, rhs,
    beta   = std.all,   # standardized β
    se     = se,
    t      = z,
    p      = pvalue
  )

# Indirect (mediation) paths — the := effects we defined
indirect_paths <- pe %>%
  filter(op == ":=") %>%
  transmute(
    name   = lhs,
    beta   = std.all,
    se     = se,
    t      = z,
    p      = pvalue
  )

############################################################
# 4. Map to relationships text (like your Word table)
############################################################

## ---- Direct effects block

direct_map <- tribble(
  ~lhs,  ~rhs, ~Relationship,
  "AP",  "AI", "Artificial intelligence \u2192 Academic performance (H1)",
  "MWB", "AI", "Artificial intelligence \u2192 Mental well-being (H2)",
  "AP",  "SM", "Social media \u2192 Academic performance (H3)",
  "MWB", "SM", "Social media \u2192 Mental well-being (H4)",
  "SL",  "AI", "Artificial intelligence \u2192 Smart learning",
  "SL",  "SM", "Social media \u2192 Smart learning",
  "AP",  "SL", "Smart learning \u2192 Academic performance",
  "MWB", "SL", "Smart learning \u2192 Mental well-being"
)

direct_table <- direct_paths %>%
  left_join(direct_map, by = c("lhs", "rhs")) %>%
  mutate(
    Section   = "Direct effect",
    beta      = round(beta, 3),
    se        = round(se, 3),
    t         = round(t, 3),
    p         = round(p, 3),
    Outcome   = if_else(p < 0.05, "Supported", "Not supported")
  ) %>%
  select(
    Section,
    Relationship,
    beta,
    se,
    t,
    p,
    Outcome
  )

## ---- Mediation effects block

indirect_map <- tribble(
  ~name,        ~Relationship,
  "ind_AI_AP",  "Artificial intelligence \u2192 Smart learning \u2192 Academic performance (H5a)",
  "ind_AI_MWB", "Artificial intelligence \u2192 Smart learning \u2192 Mental well-being (H5b)",
  "ind_SM_AP",  "Social media \u2192 Smart learning \u2192 Academic performance (H6a)",
  "ind_SM_MWB", "Social media \u2192 Smart learning \u2192 Mental well-being (H6b)"
)

indirect_table <- indirect_paths %>%
  left_join(indirect_map, by = "name") %>%
  mutate(
    Section   = "Mediation effect",
    beta      = round(beta, 3),
    se        = round(se, 3),
    t         = round(t, 3),
    p         = round(p, 3),
    Outcome   = if_else(p < 0.05, "Supported", "Not supported")
  ) %>%
  select(
    Section,
    Relationship,
    beta,
    se,
    t,
    p,
    Outcome
  )

############################################################
# 5. Combine into final Table 5
############################################################

table5 <- bind_rows(direct_table, indirect_table) %>%
  arrange(match(Section, c("Direct effect", "Mediation effect")))

table5