Data analyses for S2 main CAM study

Author

Julius Fenn, Louisa Estadieu

Notes

load cleaned data files

# sets the directory of location of this script as the current directory
# setwd(dirname(rstudioapi::getSourceEditorContext()$path))

### load packages
require(pacman)
p_load('tidyverse', 'jsonlite', 'magrittr', 'xlsx',
       'stargazer', 'psych', 'jtools', 'DT', 'ggstatsplot', 
       'lavaan', 
       'regsem', 'MplusAutomation', 'igraph')


setwd("outputs/01_dataPreperation/final")


### load questionnaire
questionnaire <- readRDS(file = "questionnaire_final.rds")
questionnaireCAMs <- readRDS(file = "questionnaireCAMs_final.rds")


networkIndicators_pre <- readRDS(file = "networkIndicators_pre_final.rds")
networkIndicators_post <- readRDS(file = "networkIndicators_post_final.rds")


CAMfiles_combined <- readRDS(file = "CAMfiles_combined_final.rds")


### load functions
print(getwd())
[1] "C:/DATEN/PHD/Article_SoftRobotIntervention/Analyses/main study/outputs/01_dataPreperation/final"
setwd("../../../../functions")
for(i in 1:length(dir())){
  # print(dir()[i])
  source(dir()[i], encoding = "utf-8")
}


setwd("../functions_CAMapp")
for(i in 1:length(dir())){
  # print(dir()[i])
  source(dir()[i], encoding = "utf-8")
}
rm(i)



### summary function
data_summary <- function(data, varname, groupnames){
  require(plyr)
  summary_func <- function(x, col){
    c(mean = mean(x[[col]], na.rm=TRUE),
      se = sd(x[[col]], na.rm=TRUE) / sqrt(length(x[[col]])))
  }
  data_sum<-ddply(data, groupnames, .fun=summary_func,
                  varname)
  data_sum <- plyr::rename(data_sum, c("mean" = varname))
  return(data_sum)
}

CAM dataset of drawn concepts to translate

tmp_nodes <- CAMfiles_combined[[1]]
tmp_nodes$text_summarized_noSuffix <- tmp_nodes$text_summarized
tmp_nodes$text_summarized_noSuffix <- str_remove(string = tmp_nodes$text_summarized, pattern = "_positive$|_negative$|_neutral$|_ambivalent$")

length(unique(tmp_nodes$CAM))
[1] 432
sort(table(tmp_nodes$text_summarized_noSuffix))

                      AP                       LC                     RCPN 
                      96                      107                      173 
                      AN                       HC                      RCA 
                     176                      189                      195 
Soziale Assistenzroboter                        T                     HRIP 
                     198                      209                      222 
                    HRIN          Rettungsroboter                        R 
                     229                      234                      244 
                     RCN                     RCPP                       TL 
                     248                      256                      265 
                     SIN                      SIP                Nachteile 
                     401                      404                      432 
                Vorteile                       MT                       TP 
                     432                      447                      604 
                      SA 
                     643 
dim(tmp_nodes)
[1] 6404   16
setwd("outputs/02_dataAnalysis")
xlsx::write.xlsx2(x = tmp_nodes, file = "drawnConcepts.xlsx")

check single scales and compute mean variables

Almere

Anxiety dimension

regEx <- "^Almere.*anx$"
nameScale <- "Almere - Anxiety"
nameVariable <- "mean_AlmereAnxiety"

### number of items
sum(str_detect(string = colnames(questionnaireCAMs), pattern = regEx))
[1] 4
### get correlation plot, descriptives, EFA, CFA

### EFA
tmp <- CFAstats(dataset = questionnaireCAMs, regularExp = regEx, labelLatent = str_remove(string = nameVariable, pattern = "mean_"), 
                showPlots = TRUE, 
                computeEFA = TRUE, 
                computeCFA = TRUE, 
                computeCFAMplus = FALSE)



descriptive statistics: 
            Mean   SD Median CoeffofVariation Minimum Maximun Lower Quantile
Almere-3anx 2.09 1.05      2             0.50       1       5              1
Almere-1anx 2.78 1.21      3             0.43       1       5              1
Almere-4anx 2.18 1.00      2             0.46       1       5              1
Almere-2anx 2.74 1.26      3             0.46       1       5              1
            Upper Quantile Skewness Kurtosis(-3) KS-Test
Almere-3anx              5     0.74        -0.26       0
Almere-1anx              5     0.00        -1.21       0
Almere-4anx              5     0.60        -0.41       0
Almere-2anx              5     0.07        -1.23       0


variables under investigation:  Almere3anx Almere1anx Almere4anx Almere2anx 

Cronbachs Alpha: 0.76 

Parallel analysis suggests that the number of factors =  2  and the number of components =  2 
AlmereAnxiety 
Number of components:  2 

KMO criteria is to low (< .6) for: 
 Almere1anx Almere4anx Almere2anx 
 mean KMO: 0.59 


EFA factor loadings (1 factor solution): 

Loadings:
           MR1  
Almere3anx 0.673
Almere1anx 0.754
Almere4anx 0.667
Almere2anx 0.727

                 MR1
SS loadings    1.995
Proportion Var 0.499
CFA summary and fit statistics: 
lavaan 0.6.17 ended normally after 22 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                         8

  Number of observations                           216

Model Test User Model:
                                              Standard      Scaled
  Test Statistic                               173.664     114.866
  Degrees of freedom                                 2           2
  P-value (Chi-square)                           0.000       0.000
  Scaling correction factor                                  1.512
    Yuan-Bentler correction (Mplus variant)                       

Model Test Baseline Model:

  Test statistic                               370.351     227.540
  Degrees of freedom                                 6           6
  P-value                                        0.000       0.000
  Scaling correction factor                                  1.628

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.529       0.491
  Tucker-Lewis Index (TLI)                      -0.413      -0.528
                                                                  
  Robust Comparative Fit Index (CFI)                         0.527
  Robust Tucker-Lewis Index (TLI)                           -0.420

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -1225.940   -1225.940
  Scaling correction factor                                  1.149
      for the MLR correction                                      
  Loglikelihood unrestricted model (H1)      -1139.108   -1139.108
  Scaling correction factor                                  1.221
      for the MLR correction                                      
                                                                  
  Akaike (AIC)                                2467.880    2467.880
  Bayesian (BIC)                              2494.882    2494.882
  Sample-size adjusted Bayesian (SABIC)       2469.531    2469.531

Root Mean Square Error of Approximation:

  RMSEA                                          0.630       0.511
  90 Percent confidence interval - lower         0.553       0.448
  90 Percent confidence interval - upper         0.711       0.577
  P-value H_0: RMSEA <= 0.050                    0.000       0.000
  P-value H_0: RMSEA >= 0.080                    1.000       1.000
                                                                  
  Robust RMSEA                                               0.628
  90 Percent confidence interval - lower                     0.534
  90 Percent confidence interval - upper                     0.729
  P-value H_0: Robust RMSEA <= 0.050                         0.000
  P-value H_0: Robust RMSEA >= 0.080                         1.000

Standardized Root Mean Square Residual:

  SRMR                                           0.194       0.194

Parameter Estimates:

  Standard errors                             Sandwich
  Information bread                           Observed
  Observed information based on                Hessian

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  AlmereAnxiety =~                                                      
    Almere3anx        1.000                               0.856    0.817
    Almere1anx        0.608    0.162    3.760    0.000    0.521    0.431
    Almere4anx        0.952    0.080   11.918    0.000    0.815    0.820
    Almere2anx        0.612    0.168    3.642    0.000    0.524    0.418

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .Almere3anx        0.366    0.098    3.744    0.000    0.366    0.333
   .Almere1anx        1.186    0.126    9.430    0.000    1.186    0.814
   .Almere4anx        0.323    0.073    4.399    0.000    0.323    0.327
   .Almere2anx        1.297    0.140    9.235    0.000    1.297    0.826
    AlmereAnxiety     0.733    0.117    6.248    0.000    1.000    1.000



CFA first 6 Modification Indices: 
          lhs op        rhs      mi    epc sepc.lv sepc.all sepc.nox
14 Almere1anx ~~ Almere2anx 129.321  1.016   1.016    0.819    0.819
11 Almere3anx ~~ Almere4anx 129.320  2.600   2.600    7.568    7.568
15 Almere4anx ~~ Almere2anx  13.246 -0.302  -0.302   -0.467   -0.467
10 Almere3anx ~~ Almere1anx  13.245 -0.316  -0.316   -0.479   -0.479
13 Almere1anx ~~ Almere4anx   9.696 -0.257  -0.257   -0.416   -0.416
12 Almere3anx ~~ Almere2anx   9.696 -0.272  -0.272   -0.394   -0.394
### variable mean
questionnaireCAMs[[nameVariable]]  <- questionnaireCAMs %>%
  select(matches(regEx)) %>%
  rowMeans(na.rm = TRUE)

Attitude dimension

regEx <- "^Almere.*att$"
nameScale <- "Almere - Attitude"
nameVariable <- "mean_AlmereAttitude"

### number of items
sum(str_detect(string = colnames(questionnaireCAMs), pattern = regEx))
[1] 3
### get correlation plot, descriptives, EFA, CFA

### EFA
tmp <- CFAstats(dataset = questionnaireCAMs, regularExp = regEx, labelLatent = str_remove(string = nameVariable, pattern = "mean_"), 
                showPlots = TRUE, 
                computeEFA = TRUE, 
                computeCFA = TRUE, 
                computeCFAMplus = FALSE)



descriptive statistics: 
            Mean   SD Median CoeffofVariation Minimum Maximun Lower Quantile
Almere-1att 4.06 0.78      4             0.19       1       5              1
Almere-3att 4.35 0.70      4             0.16       1       5              1
Almere-2att 3.73 0.91      4             0.24       1       5              1
            Upper Quantile Skewness Kurtosis(-3) KS-Test
Almere-1att              5    -0.92         1.56       0
Almere-3att              5    -1.10         2.07       0
Almere-2att              5    -0.76         0.47       0


variables under investigation:  Almere1att Almere3att Almere2att 

Cronbachs Alpha: 0.73 

Parallel analysis suggests that the number of factors =  1  and the number of components =  1 
AlmereAttitude 
Number of components:  1 

KMO criteria is to low (< .6) for: 
 Almere1att 
 mean KMO: 0.62 


EFA factor loadings (1 factor solution): 

Loadings:
           MR1  
Almere1att 1.000
Almere3att 0.681
Almere2att 0.570

                 MR1
SS loadings    1.788
Proportion Var 0.596
CFA summary and fit statistics: 
lavaan 0.6.17 ended normally after 19 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                         6

  Number of observations                           216

Model Test User Model:
                                              Standard      Scaled
  Test Statistic                                 0.000       0.000
  Degrees of freedom                                 0           0

Model Test Baseline Model:

  Test statistic                               154.233     148.594
  Degrees of freedom                                 3           3
  P-value                                        0.000       0.000
  Scaling correction factor                                  1.038

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    1.000       1.000
  Tucker-Lewis Index (TLI)                       1.000       1.000
                                                                  
  Robust Comparative Fit Index (CFI)                            NA
  Robust Tucker-Lewis Index (TLI)                               NA

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)               -689.854    -689.854
  Loglikelihood unrestricted model (H1)       -689.854    -689.854
                                                                  
  Akaike (AIC)                                1391.707    1391.707
  Bayesian (BIC)                              1411.959    1411.959
  Sample-size adjusted Bayesian (SABIC)       1392.946    1392.946

Root Mean Square Error of Approximation:

  RMSEA                                          0.000          NA
  90 Percent confidence interval - lower         0.000          NA
  90 Percent confidence interval - upper         0.000          NA
  P-value H_0: RMSEA <= 0.050                       NA          NA
  P-value H_0: RMSEA >= 0.080                       NA          NA
                                                                  
  Robust RMSEA                                               0.000
  90 Percent confidence interval - lower                     0.000
  90 Percent confidence interval - upper                     0.000
  P-value H_0: Robust RMSEA <= 0.050                            NA
  P-value H_0: Robust RMSEA >= 0.080                            NA

Standardized Root Mean Square Residual:

  SRMR                                           0.000       0.000

Parameter Estimates:

  Standard errors                             Sandwich
  Information bread                           Observed
  Observed information based on                Hessian

Latent Variables:
                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  AlmereAttitude =~                                                      
    Almere1att         1.000                               0.739    0.945
    Almere3att         0.571    0.112    5.123    0.000    0.422    0.605
    Almere2att         0.673    0.117    5.734    0.000    0.498    0.551

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .Almere1att        0.066    0.083    0.794    0.427    0.066    0.107
   .Almere3att        0.309    0.052    5.937    0.000    0.309    0.634
   .Almere2att        0.569    0.068    8.386    0.000    0.569    0.697
    AlmereAttitude    0.546    0.108    5.057    0.000    1.000    1.000



CFA first 6 Modification Indices: 
[1] lhs      op       rhs      mi       epc      sepc.lv  sepc.all sepc.nox
<0 Zeilen> (oder row.names mit Länge 0)
### variable mean
questionnaireCAMs[[nameVariable]]  <- questionnaireCAMs %>%
  select(matches(regEx)) %>%
  rowMeans(na.rm = TRUE)

Li & Wang (2021)

Anthropomorphism dimension

regEx <- "^LiWang.*anthropomorphism$"
nameScale <- "LiWang - Anthropomorphism"
nameVariable <- "mean_LiWangAnthropomorphism"

### number of items
sum(str_detect(string = colnames(questionnaireCAMs), pattern = regEx))
[1] 5
### get correlation plot, descriptives, EFA, CFA

### EFA
tmp <- CFAstats(dataset = questionnaireCAMs, regularExp = regEx, labelLatent = str_remove(string = nameVariable, pattern = "mean_"), 
                showPlots = TRUE, 
                computeEFA = TRUE, 
                computeCFA = TRUE, 
                computeCFAMplus = FALSE)



descriptive statistics: 
                         Mean   SD Median CoeffofVariation Minimum Maximun
LiWang-5anthropomorphism 3.07 1.68      3             0.55       1       7
LiWang-3anthropomorphism 2.21 1.28      2             0.58       1       6
LiWang-2anthropomorphism 3.12 1.62      3             0.52       1       7
LiWang-1anthropomorphism 2.30 1.42      2             0.62       1       6
LiWang-4anthropomorphism 2.75 1.53      2             0.55       1       7
                         Lower Quantile Upper Quantile Skewness Kurtosis(-3)
LiWang-5anthropomorphism              1              7     0.36        -0.82
LiWang-3anthropomorphism              1              6     0.78        -0.37
LiWang-2anthropomorphism              1              7     0.20        -1.00
LiWang-1anthropomorphism              1              6     0.89        -0.24
LiWang-4anthropomorphism              1              7     0.41        -0.96
                         KS-Test
LiWang-5anthropomorphism       0
LiWang-3anthropomorphism       0
LiWang-2anthropomorphism       0
LiWang-1anthropomorphism       0
LiWang-4anthropomorphism       0


variables under investigation:  LiWang5anthropomorphism LiWang3anthropomorphism LiWang2anthropomorphism LiWang1anthropomorphism LiWang4anthropomorphism 

Cronbachs Alpha: 0.82 
Error in if (any(lower > upper)) stop("lower>upper integration limits") : 
  Fehlender Wert, wo TRUE/FALSE nötig ist
[1] "use instead of polychoric correlations pearson correlations"

Parallel analysis suggests that the number of factors =  1  and the number of components =  1 
LiWangAnthropomorphism 
Number of components:  1 

Error in if (any(lower > upper)) stop("lower>upper integration limits") : 
  Fehlender Wert, wo TRUE/FALSE nötig ist
[1] "use instead of polychoric correlations pearson correlations"


EFA factor loadings (1 factor solution): 

Loadings:
                        MR1  
LiWang5anthropomorphism 0.534
LiWang3anthropomorphism 0.828
LiWang2anthropomorphism 0.519
LiWang1anthropomorphism 0.847
LiWang4anthropomorphism 0.763

                 MR1
SS loadings    2.541
Proportion Var 0.508
CFA summary and fit statistics: 
lavaan 0.6.17 ended normally after 28 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        10

  Number of observations                           216

Model Test User Model:
                                              Standard      Scaled
  Test Statistic                                18.074      12.462
  Degrees of freedom                                 5           5
  P-value (Chi-square)                           0.003       0.029
  Scaling correction factor                                  1.450
    Yuan-Bentler correction (Mplus variant)                       

Model Test Baseline Model:

  Test statistic                               431.294     246.390
  Degrees of freedom                                10          10
  P-value                                        0.000       0.000
  Scaling correction factor                                  1.750

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.969       0.968
  Tucker-Lewis Index (TLI)                       0.938       0.937
                                                                  
  Robust Comparative Fit Index (CFI)                         0.974
  Robust Tucker-Lewis Index (TLI)                            0.948

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -1760.394   -1760.394
  Scaling correction factor                                  1.356
      for the MLR correction                                      
  Loglikelihood unrestricted model (H1)      -1751.357   -1751.357
  Scaling correction factor                                  1.388
      for the MLR correction                                      
                                                                  
  Akaike (AIC)                                3540.788    3540.788
  Bayesian (BIC)                              3574.541    3574.541
  Sample-size adjusted Bayesian (SABIC)       3542.852    3542.852

Root Mean Square Error of Approximation:

  RMSEA                                          0.110       0.083
  90 Percent confidence interval - lower         0.058       0.035
  90 Percent confidence interval - upper         0.167       0.132
  P-value H_0: RMSEA <= 0.050                    0.031       0.112
  P-value H_0: RMSEA >= 0.080                    0.847       0.591
                                                                  
  Robust RMSEA                                               0.100
  90 Percent confidence interval - lower                     0.030
  90 Percent confidence interval - upper                     0.171
  P-value H_0: Robust RMSEA <= 0.050                         0.101
  P-value H_0: Robust RMSEA >= 0.080                         0.733

Standardized Root Mean Square Residual:

  SRMR                                           0.038       0.038

Parameter Estimates:

  Standard errors                             Sandwich
  Information bread                           Observed
  Observed information based on                Hessian

Latent Variables:
                            Estimate  Std.Err  z-value  P(>|z|)   Std.lv
  LiWangAnthropomorphism =~                                             
    LWng5nthrpmrph             1.000                               0.853
    LWng3nthrpmrph             1.275    0.158    8.074    0.000    1.088
    LWng2nthrpmrph             0.966    0.157    6.165    0.000    0.824
    LWng1nthrpmrph             1.443    0.188    7.658    0.000    1.231
    LWng4nthrpmrph             1.302    0.161    8.089    0.000    1.111
  Std.all
         
    0.510
    0.852
    0.508
    0.869
    0.729

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .LWng5nthrpmrph    2.072    0.250    8.297    0.000    2.072    0.740
   .LWng3nthrpmrph    0.447    0.101    4.425    0.000    0.447    0.274
   .LWng2nthrpmrph    1.949    0.205    9.496    0.000    1.949    0.741
   .LWng1nthrpmrph    0.490    0.112    4.391    0.000    0.490    0.244
   .LWng4nthrpmrph    1.089    0.198    5.515    0.000    1.089    0.469
    LWngAnthrpmrph    0.728    0.180    4.034    0.000    1.000    1.000



CFA first 6 Modification Indices: 
                       lhs op                     rhs     mi    epc sepc.lv
17 LiWang3anthropomorphism ~~ LiWang1anthropomorphism 10.596  0.381   0.381
14 LiWang5anthropomorphism ~~ LiWang1anthropomorphism  9.066 -0.301  -0.301
15 LiWang5anthropomorphism ~~ LiWang4anthropomorphism  8.305  0.334   0.334
16 LiWang3anthropomorphism ~~ LiWang2anthropomorphism  6.031 -0.214  -0.214
21 LiWang1anthropomorphism ~~ LiWang4anthropomorphism  1.804 -0.144  -0.144
18 LiWang3anthropomorphism ~~ LiWang4anthropomorphism  1.770 -0.125  -0.125
   sepc.all sepc.nox
17    0.814    0.814
14   -0.299   -0.299
15    0.222    0.222
16   -0.230   -0.230
21   -0.197   -0.197
18   -0.179   -0.179
### variable mean
questionnaireCAMs[[nameVariable]]  <- questionnaireCAMs %>%
  select(matches(regEx)) %>%
  rowMeans(na.rm = TRUE)

Autonomy dimension

regEx <- "^LiWang.*autonomy$"
nameScale <- "LiWang - Autonomy"
nameVariable <- "mean_LiWangAutonomy"

### number of items
sum(str_detect(string = colnames(questionnaireCAMs), pattern = regEx))
[1] 3
### get correlation plot, descriptives, EFA, CFA

### EFA
tmp <- CFAstats(dataset = questionnaireCAMs, regularExp = regEx, labelLatent = str_remove(string = nameVariable, pattern = "mean_"), 
                showPlots = TRUE, 
                computeEFA = TRUE, 
                computeCFA = TRUE, 
                computeCFAMplus = FALSE)



descriptive statistics: 
                 Mean   SD Median CoeffofVariation Minimum Maximun
LiWang-3autonomy 5.31 1.30      6             0.25       1       7
LiWang-1autonomy 5.59 1.31      6             0.23       1       7
LiWang-2autonomy 4.29 1.57      5             0.37       1       7
                 Lower Quantile Upper Quantile Skewness Kurtosis(-3) KS-Test
LiWang-3autonomy              1              7    -1.02         1.15       0
LiWang-1autonomy              1              7    -1.43         2.66       0
LiWang-2autonomy              1              7    -0.48        -0.52       0


variables under investigation:  LiWang3autonomy LiWang1autonomy LiWang2autonomy 

Cronbachs Alpha: 0.66 

Parallel analysis suggests that the number of factors =  1  and the number of components =  1 
LiWangAutonomy 
Number of components:  1 



EFA factor loadings (1 factor solution): 

Loadings:
                MR1  
LiWang3autonomy 0.710
LiWang1autonomy 0.539
LiWang2autonomy 0.686

                 MR1
SS loadings    1.266
Proportion Var 0.422
CFA summary and fit statistics: 
lavaan 0.6.17 ended normally after 24 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                         6

  Number of observations                           216

Model Test User Model:
                                              Standard      Scaled
  Test Statistic                                 0.000       0.000
  Degrees of freedom                                 0           0

Model Test Baseline Model:

  Test statistic                                92.541      56.237
  Degrees of freedom                                 3           3
  P-value                                        0.000       0.000
  Scaling correction factor                                  1.646

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    1.000       1.000
  Tucker-Lewis Index (TLI)                       1.000       1.000
                                                                  
  Robust Comparative Fit Index (CFI)                            NA
  Robust Tucker-Lewis Index (TLI)                               NA

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -1084.868   -1084.868
  Loglikelihood unrestricted model (H1)      -1084.868   -1084.868
                                                                  
  Akaike (AIC)                                2181.737    2181.737
  Bayesian (BIC)                              2201.988    2201.988
  Sample-size adjusted Bayesian (SABIC)       2182.975    2182.975

Root Mean Square Error of Approximation:

  RMSEA                                          0.000          NA
  90 Percent confidence interval - lower         0.000          NA
  90 Percent confidence interval - upper         0.000          NA
  P-value H_0: RMSEA <= 0.050                       NA          NA
  P-value H_0: RMSEA >= 0.080                       NA          NA
                                                                  
  Robust RMSEA                                               0.000
  90 Percent confidence interval - lower                     0.000
  90 Percent confidence interval - upper                     0.000
  P-value H_0: Robust RMSEA <= 0.050                            NA
  P-value H_0: Robust RMSEA >= 0.080                            NA

Standardized Root Mean Square Residual:

  SRMR                                           0.000       0.000

Parameter Estimates:

  Standard errors                             Sandwich
  Information bread                           Observed
  Observed information based on                Hessian

Latent Variables:
                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  LiWangAutonomy =~                                                      
    LiWang3autonmy     1.000                               0.886    0.682
    LiWang1autonmy     0.821    0.179    4.589    0.000    0.728    0.556
    LiWang2autonmy     1.146    0.269    4.263    0.000    1.016    0.648

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .LiWang3autonmy    0.901    0.269    3.353    0.001    0.901    0.534
   .LiWang1autonmy    1.184    0.193    6.144    0.000    1.184    0.691
   .LiWang2autonmy    1.425    0.244    5.827    0.000    1.425    0.580
    LiWangAutonomy    0.785    0.281    2.797    0.005    1.000    1.000



CFA first 6 Modification Indices: 
[1] lhs      op       rhs      mi       epc      sepc.lv  sepc.all sepc.nox
<0 Zeilen> (oder row.names mit Länge 0)
### variable mean
questionnaireCAMs[[nameVariable]]  <- questionnaireCAMs %>%
  select(matches(regEx)) %>%
  rowMeans(na.rm = TRUE)

General Attitudes Towards Robots Scale, GAToRS (2022)

Personal Level Positive Attitude

regEx <- "^GAToRS.*pp$"
nameScale <- "GAToRS - PP"
nameVariable <- "mean_GAToRSpp"

### number of items
sum(str_detect(string = colnames(questionnaireCAMs), pattern = regEx))
[1] 5
### get correlation plot, descriptives, EFA, CFA

### EFA
tmp <- CFAstats(dataset = questionnaireCAMs, regularExp = regEx, labelLatent = str_remove(string = nameVariable, pattern = "mean_"), 
                showPlots = TRUE, 
                computeEFA = TRUE, 
                computeCFA = TRUE, 
                computeCFAMplus = FALSE)



descriptive statistics: 
           Mean   SD Median CoeffofVariation Minimum Maximun Lower Quantile
GAToRS-4pp 4.27 1.51      4             0.35       1       7              1
GAToRS-3pp 4.18 1.32      4             0.32       1       7              1
GAToRS-5pp 4.04 1.63      4             0.40       1       7              1
GAToRS-2pp 4.41 1.42      5             0.32       1       7              1
GAToRS-1pp 4.30 1.29      4             0.30       1       7              1
           Upper Quantile Skewness Kurtosis(-3) KS-Test
GAToRS-4pp              7    -0.19        -0.66       0
GAToRS-3pp              7    -0.54         0.35       0
GAToRS-5pp              7    -0.21        -0.77       0
GAToRS-2pp              7    -0.37        -0.44       0
GAToRS-1pp              7    -0.43         0.17       0


variables under investigation:  GAToRS4pp GAToRS3pp GAToRS5pp GAToRS2pp GAToRS1pp 

Cronbachs Alpha: 0.69 

Parallel analysis suggests that the number of factors =  2  and the number of components =  1 
GAToRSpp 
Number of components:  1 



EFA factor loadings (1 factor solution): 

Loadings:
          MR1  
GAToRS4pp 0.624
GAToRS3pp 0.713
GAToRS5pp 0.345
GAToRS2pp 0.591
GAToRS1pp 0.617

                 MR1
SS loadings    1.746
Proportion Var 0.349
CFA summary and fit statistics: 
lavaan 0.6.17 ended normally after 29 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        10

  Number of observations                           216

Model Test User Model:
                                              Standard      Scaled
  Test Statistic                                37.667      40.165
  Degrees of freedom                                 5           5
  P-value (Chi-square)                           0.000       0.000
  Scaling correction factor                                  0.938
    Yuan-Bentler correction (Mplus variant)                       

Model Test Baseline Model:

  Test statistic                               209.654     174.056
  Degrees of freedom                                10          10
  P-value                                        0.000       0.000
  Scaling correction factor                                  1.205

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.836       0.786
  Tucker-Lewis Index (TLI)                       0.673       0.571
                                                                  
  Robust Comparative Fit Index (CFI)                         0.833
  Robust Tucker-Lewis Index (TLI)                            0.666

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -1831.716   -1831.716
  Scaling correction factor                                  1.168
      for the MLR correction                                      
  Loglikelihood unrestricted model (H1)      -1812.882   -1812.882
  Scaling correction factor                                  1.091
      for the MLR correction                                      
                                                                  
  Akaike (AIC)                                3683.431    3683.431
  Bayesian (BIC)                              3717.184    3717.184
  Sample-size adjusted Bayesian (SABIC)       3685.496    3685.496

Root Mean Square Error of Approximation:

  RMSEA                                          0.174       0.180
  90 Percent confidence interval - lower         0.125       0.130
  90 Percent confidence interval - upper         0.228       0.236
  P-value H_0: RMSEA <= 0.050                    0.000       0.000
  P-value H_0: RMSEA >= 0.080                    0.999       0.999
                                                                  
  Robust RMSEA                                               0.175
  90 Percent confidence interval - lower                     0.127
  90 Percent confidence interval - upper                     0.227
  P-value H_0: Robust RMSEA <= 0.050                         0.000
  P-value H_0: Robust RMSEA >= 0.080                         0.999

Standardized Root Mean Square Residual:

  SRMR                                           0.075       0.075

Parameter Estimates:

  Standard errors                             Sandwich
  Information bread                           Observed
  Observed information based on                Hessian

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  GAToRSpp =~                                                           
    GAToRS4pp         1.000                               0.883    0.585
    GAToRS3pp         1.003    0.140    7.191    0.000    0.886    0.670
    GAToRS5pp         0.547    0.188    2.919    0.004    0.483    0.297
    GAToRS2pp         0.967    0.196    4.941    0.000    0.853    0.601
    GAToRS1pp         0.960    0.210    4.571    0.000    0.848    0.657

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .GAToRS4pp         1.501    0.186    8.073    0.000    1.501    0.658
   .GAToRS3pp         0.962    0.190    5.062    0.000    0.962    0.551
   .GAToRS5pp         2.413    0.244    9.889    0.000    2.413    0.912
   .GAToRS2pp         1.291    0.203    6.365    0.000    1.291    0.639
   .GAToRS1pp         0.945    0.170    5.545    0.000    0.945    0.568
    GAToRSpp          0.779    0.198    3.930    0.000    1.000    1.000



CFA first 6 Modification Indices: 
         lhs op       rhs     mi    epc sepc.lv sepc.all sepc.nox
20 GAToRS5pp ~~ GAToRS1pp 16.774 -0.518  -0.518   -0.343   -0.343
21 GAToRS2pp ~~ GAToRS1pp 15.266  0.487   0.487    0.441    0.441
17 GAToRS3pp ~~ GAToRS2pp 13.196 -0.472  -0.472   -0.423   -0.423
13 GAToRS4pp ~~ GAToRS5pp 12.423  0.525   0.525    0.276    0.276
15 GAToRS4pp ~~ GAToRS1pp  9.735 -0.405  -0.405   -0.340   -0.340
16 GAToRS3pp ~~ GAToRS5pp  3.438  0.241   0.241    0.158    0.158
### variable mean
questionnaireCAMs[[nameVariable]]  <- questionnaireCAMs %>%
  select(matches(regEx)) %>%
  rowMeans(na.rm = TRUE)

Personal Level Negative Attitude

regEx <- "^GAToRS.*pn$"
nameScale <- "GAToRS - pn"
nameVariable <- "mean_GAToRSpn"

### number of items
sum(str_detect(string = colnames(questionnaireCAMs), pattern = regEx))
[1] 5
### get correlation plot, descriptives, EFA, CFA

### EFA
tmp <- CFAstats(dataset = questionnaireCAMs, regularExp = regEx, labelLatent = str_remove(string = nameVariable, pattern = "mean_"), 
                showPlots = TRUE, 
                computeEFA = TRUE, 
                computeCFA = TRUE, 
                computeCFAMplus = FALSE)



descriptive statistics: 
           Mean   SD Median CoeffofVariation Minimum Maximun Lower Quantile
GAToRS-2pn 3.78 1.50      4             0.40       1       7              1
GAToRS-4pn 2.77 1.47      3             0.53       1       7              1
GAToRS-3pn 2.56 1.41      2             0.55       1       7              1
GAToRS-1pn 2.88 1.52      3             0.53       1       7              1
GAToRS-5pn 3.27 1.70      3             0.52       1       7              1
           Upper Quantile Skewness Kurtosis(-3) KS-Test
GAToRS-2pn              7     0.12        -0.59       0
GAToRS-4pn              7     0.82         0.26       0
GAToRS-3pn              7     0.83         0.22       0
GAToRS-1pn              7     0.64        -0.38       0
GAToRS-5pn              7     0.38        -0.75       0


variables under investigation:  GAToRS2pn GAToRS4pn GAToRS3pn GAToRS1pn GAToRS5pn 

Cronbachs Alpha: 0.82 
Error in if (any(lower > upper)) stop("lower>upper integration limits") : 
  Fehlender Wert, wo TRUE/FALSE nötig ist
[1] "use instead of polychoric correlations pearson correlations"

Parallel analysis suggests that the number of factors =  1  and the number of components =  1 
GAToRSpn 
Number of components:  1 



EFA factor loadings (1 factor solution): 

Loadings:
          MR1  
GAToRS2pn 0.560
GAToRS4pn 0.769
GAToRS3pn 0.796
GAToRS1pn 0.720
GAToRS5pn 0.745

                 MR1
SS loadings    2.612
Proportion Var 0.522
CFA summary and fit statistics: 
lavaan 0.6.17 ended normally after 24 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        10

  Number of observations                           216

Model Test User Model:
                                              Standard      Scaled
  Test Statistic                                 4.864       4.224
  Degrees of freedom                                 5           5
  P-value (Chi-square)                           0.433       0.518
  Scaling correction factor                                  1.152
    Yuan-Bentler correction (Mplus variant)                       

Model Test Baseline Model:

  Test statistic                               343.042     252.792
  Degrees of freedom                                10          10
  P-value                                        0.000       0.000
  Scaling correction factor                                  1.357

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    1.000       1.000
  Tucker-Lewis Index (TLI)                       1.001       1.006
                                                                  
  Robust Comparative Fit Index (CFI)                         1.000
  Robust Tucker-Lewis Index (TLI)                            1.005

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -1811.124   -1811.124
  Scaling correction factor                                  1.219
      for the MLR correction                                      
  Loglikelihood unrestricted model (H1)      -1808.692   -1808.692
  Scaling correction factor                                  1.197
      for the MLR correction                                      
                                                                  
  Akaike (AIC)                                3642.248    3642.248
  Bayesian (BIC)                              3676.000    3676.000
  Sample-size adjusted Bayesian (SABIC)       3644.312    3644.312

Root Mean Square Error of Approximation:

  RMSEA                                          0.000       0.000
  90 Percent confidence interval - lower         0.000       0.000
  90 Percent confidence interval - upper         0.093       0.082
  P-value H_0: RMSEA <= 0.050                    0.696       0.784
  P-value H_0: RMSEA >= 0.080                    0.101       0.056
                                                                  
  Robust RMSEA                                               0.000
  90 Percent confidence interval - lower                     0.000
  90 Percent confidence interval - upper                     0.093
  P-value H_0: Robust RMSEA <= 0.050                         0.736
  P-value H_0: Robust RMSEA >= 0.080                         0.095

Standardized Root Mean Square Residual:

  SRMR                                           0.020       0.020

Parameter Estimates:

  Standard errors                             Sandwich
  Information bread                           Observed
  Observed information based on                Hessian

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  GAToRSpn =~                                                           
    GAToRS2pn         1.000                               0.801    0.535
    GAToRS4pn         1.351    0.188    7.197    0.000    1.082    0.740
    GAToRS3pn         1.344    0.191    7.037    0.000    1.077    0.764
    GAToRS1pn         1.285    0.212    6.061    0.000    1.030    0.680
    GAToRS5pn         1.519    0.188    8.067    0.000    1.217    0.717

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .GAToRS2pn         1.603    0.161    9.935    0.000    1.603    0.714
   .GAToRS4pn         0.970    0.162    5.991    0.000    0.970    0.453
   .GAToRS3pn         0.828    0.123    6.735    0.000    0.828    0.417
   .GAToRS1pn         1.235    0.213    5.784    0.000    1.235    0.538
   .GAToRS5pn         1.403    0.206    6.796    0.000    1.403    0.486
    GAToRSpn          0.642    0.171    3.748    0.000    1.000    1.000



CFA first 6 Modification Indices: 
         lhs op       rhs    mi    epc sepc.lv sepc.all sepc.nox
16 GAToRS4pn ~~ GAToRS3pn 4.303  0.225   0.225    0.251    0.251
18 GAToRS4pn ~~ GAToRS5pn 1.856 -0.171  -0.171   -0.146   -0.146
13 GAToRS2pn ~~ GAToRS3pn 1.754 -0.137  -0.137   -0.119   -0.119
21 GAToRS1pn ~~ GAToRS5pn 1.151  0.135   0.135    0.103    0.103
15 GAToRS2pn ~~ GAToRS5pn 0.834  0.115   0.115    0.077    0.077
19 GAToRS3pn ~~ GAToRS1pn 0.579 -0.081  -0.081   -0.080   -0.080
### variable mean
questionnaireCAMs[[nameVariable]]  <- questionnaireCAMs %>%
  select(matches(regEx)) %>%
  rowMeans(na.rm = TRUE)

Societal Level Positive Attitude

regEx <- "^GAToRS.*sp$"
nameScale <- "GAToRS - sp"
nameVariable <- "mean_GAToRSsp"

### number of items
sum(str_detect(string = colnames(questionnaireCAMs), pattern = regEx))
[1] 5
### get correlation plot, descriptives, EFA, CFA

### EFA
tmp <- CFAstats(dataset = questionnaireCAMs, regularExp = regEx, labelLatent = str_remove(string = nameVariable, pattern = "mean_"), 
                showPlots = TRUE, 
                computeEFA = TRUE, 
                computeCFA = TRUE, 
                computeCFAMplus = FALSE)



descriptive statistics: 
           Mean   SD Median CoeffofVariation Minimum Maximun Lower Quantile
GAToRS-3sp 5.74 1.22      6             0.21       1       7              1
GAToRS-2sp 6.14 0.94      6             0.15       2       7              2
GAToRS-1sp 6.04 1.05      6             0.17       2       7              2
GAToRS-4sp 5.43 1.36      6             0.25       1       7              1
GAToRS-5sp 5.49 1.10      6             0.20       1       7              1
           Upper Quantile Skewness Kurtosis(-3) KS-Test
GAToRS-3sp              7    -1.09         1.09       0
GAToRS-2sp              7    -1.32         2.34       0
GAToRS-1sp              7    -1.27         1.76       0
GAToRS-4sp              7    -1.01         0.97       0
GAToRS-5sp              7    -1.06         1.89       0


variables under investigation:  GAToRS3sp GAToRS2sp GAToRS1sp GAToRS4sp GAToRS5sp 

Cronbachs Alpha: 0.8 

Parallel analysis suggests that the number of factors =  1  and the number of components =  1 
GAToRSsp 
Number of components:  1 



EFA factor loadings (1 factor solution): 

Loadings:
          MR1  
GAToRS3sp 0.681
GAToRS2sp 0.826
GAToRS1sp 0.749
GAToRS4sp 0.532
GAToRS5sp 0.712

                 MR1
SS loadings    2.498
Proportion Var 0.500
CFA summary and fit statistics: 
lavaan 0.6.17 ended normally after 22 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        10

  Number of observations                           216

Model Test User Model:
                                              Standard      Scaled
  Test Statistic                                10.602      10.107
  Degrees of freedom                                 5           5
  P-value (Chi-square)                           0.060       0.072
  Scaling correction factor                                  1.049
    Yuan-Bentler correction (Mplus variant)                       

Model Test Baseline Model:

  Test statistic                               316.156     251.157
  Degrees of freedom                                10          10
  P-value                                        0.000       0.000
  Scaling correction factor                                  1.259

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.982       0.979
  Tucker-Lewis Index (TLI)                       0.963       0.958
                                                                  
  Robust Comparative Fit Index (CFI)                         0.982
  Robust Tucker-Lewis Index (TLI)                            0.965

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -1504.582   -1504.582
  Scaling correction factor                                  1.637
      for the MLR correction                                      
  Loglikelihood unrestricted model (H1)      -1499.281   -1499.281
  Scaling correction factor                                  1.441
      for the MLR correction                                      
                                                                  
  Akaike (AIC)                                3029.164    3029.164
  Bayesian (BIC)                              3062.916    3062.916
  Sample-size adjusted Bayesian (SABIC)       3031.228    3031.228

Root Mean Square Error of Approximation:

  RMSEA                                          0.072       0.069
  90 Percent confidence interval - lower         0.000       0.000
  90 Percent confidence interval - upper         0.133       0.129
  P-value H_0: RMSEA <= 0.050                    0.226       0.251
  P-value H_0: RMSEA >= 0.080                    0.474       0.435
                                                                  
  Robust RMSEA                                               0.070
  90 Percent confidence interval - lower                     0.000
  90 Percent confidence interval - upper                     0.133
  P-value H_0: Robust RMSEA <= 0.050                         0.244
  P-value H_0: Robust RMSEA >= 0.080                         0.461

Standardized Root Mean Square Residual:

  SRMR                                           0.033       0.033

Parameter Estimates:

  Standard errors                             Sandwich
  Information bread                           Observed
  Observed information based on                Hessian

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  GAToRSsp =~                                                           
    GAToRS3sp         1.000                               0.770    0.632
    GAToRS2sp         0.972    0.155    6.251    0.000    0.748    0.798
    GAToRS1sp         0.929    0.154    6.039    0.000    0.715    0.683
    GAToRS4sp         0.887    0.164    5.412    0.000    0.682    0.503
    GAToRS5sp         1.018    0.165    6.177    0.000    0.783    0.711

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .GAToRS3sp         0.889    0.154    5.777    0.000    0.889    0.600
   .GAToRS2sp         0.320    0.057    5.635    0.000    0.320    0.364
   .GAToRS1sp         0.584    0.096    6.087    0.000    0.584    0.533
   .GAToRS4sp         1.372    0.181    7.574    0.000    1.372    0.747
   .GAToRS5sp         0.600    0.090    6.632    0.000    0.600    0.494
    GAToRSsp          0.592    0.171    3.463    0.001    1.000    1.000



CFA first 6 Modification Indices: 
         lhs op       rhs    mi    epc sepc.lv sepc.all sepc.nox
19 GAToRS1sp ~~ GAToRS4sp 7.371  0.199   0.199    0.222    0.222
18 GAToRS2sp ~~ GAToRS5sp 6.055  0.149   0.149    0.340    0.340
16 GAToRS2sp ~~ GAToRS1sp 3.948 -0.110  -0.110   -0.255   -0.255
21 GAToRS4sp ~~ GAToRS5sp 3.480 -0.143  -0.143   -0.157   -0.157
15 GAToRS3sp ~~ GAToRS5sp 0.634 -0.054  -0.054   -0.075   -0.075
17 GAToRS2sp ~~ GAToRS4sp 0.579 -0.050  -0.050   -0.075   -0.075
### variable mean
questionnaireCAMs[[nameVariable]]  <- questionnaireCAMs %>%
  select(matches(regEx)) %>%
  rowMeans(na.rm = TRUE)

Societal Level Negative Attitude

regEx <- "^GAToRS.*sn$"
nameScale <- "GAToRS - sn"
nameVariable <- "mean_GAToRSsn"

### number of items
sum(str_detect(string = colnames(questionnaireCAMs), pattern = regEx))
[1] 5
### get correlation plot, descriptives, EFA, CFA

### EFA
tmp <- CFAstats(dataset = questionnaireCAMs, regularExp = regEx, labelLatent = str_remove(string = nameVariable, pattern = "mean_"), 
                showPlots = TRUE, 
                computeEFA = TRUE, 
                computeCFA = TRUE, 
                computeCFAMplus = FALSE)



descriptive statistics: 
           Mean   SD Median CoeffofVariation Minimum Maximun Lower Quantile
GAToRS-5sn 5.36 1.39      6             0.26       1       7              1
GAToRS-1sn 4.94 1.65      5             0.33       1       7              1
GAToRS-3sn 4.52 1.72      5             0.38       1       7              1
GAToRS-4sn 5.44 1.30      5             0.24       1       7              1
GAToRS-2sn 4.77 1.53      5             0.32       1       7              1
           Upper Quantile Skewness Kurtosis(-3) KS-Test
GAToRS-5sn              7    -0.91         0.60       0
GAToRS-1sn              7    -0.62        -0.51       0
GAToRS-3sn              7    -0.44        -0.75       0
GAToRS-4sn              7    -0.72         0.30       0
GAToRS-2sn              7    -0.54        -0.35       0


variables under investigation:  GAToRS5sn GAToRS1sn GAToRS3sn GAToRS4sn GAToRS2sn 

Cronbachs Alpha: 0.62 

Parallel analysis suggests that the number of factors =  2  and the number of components =  1 
GAToRSsn 
Number of components:  1 

KMO criteria is to low (< .6) for: 
 GAToRS4sn 
 mean KMO: 0.64 


EFA factor loadings (1 factor solution): 

Loadings:
          MR1  
GAToRS5sn 0.550
GAToRS1sn 0.610
GAToRS3sn 0.478
GAToRS4sn 0.450
GAToRS2sn 0.509

                 MR1
SS loadings    1.363
Proportion Var 0.273
CFA summary and fit statistics: 
lavaan 0.6.17 ended normally after 38 iterations

  Estimator                                         ML
  Optimization method                           NLMINB
  Number of model parameters                        10

  Number of observations                           216

Model Test User Model:
                                              Standard      Scaled
  Test Statistic                                33.232      52.763
  Degrees of freedom                                 5           5
  P-value (Chi-square)                           0.000       0.000
  Scaling correction factor                                  0.630
    Yuan-Bentler correction (Mplus variant)                       

Model Test Baseline Model:

  Test statistic                               129.470     100.031
  Degrees of freedom                                10          10
  P-value                                        0.000       0.000
  Scaling correction factor                                  1.294

User Model versus Baseline Model:

  Comparative Fit Index (CFI)                    0.764       0.469
  Tucker-Lewis Index (TLI)                       0.527      -0.061
                                                                  
  Robust Comparative Fit Index (CFI)                         0.742
  Robust Tucker-Lewis Index (TLI)                            0.484

Loglikelihood and Information Criteria:

  Loglikelihood user model (H0)              -1926.353   -1926.353
  Scaling correction factor                                  1.444
      for the MLR correction                                      
  Loglikelihood unrestricted model (H1)      -1909.737   -1909.737
  Scaling correction factor                                  1.173
      for the MLR correction                                      
                                                                  
  Akaike (AIC)                                3872.705    3872.705
  Bayesian (BIC)                              3906.458    3906.458
  Sample-size adjusted Bayesian (SABIC)       3874.770    3874.770

Root Mean Square Error of Approximation:

  RMSEA                                          0.162       0.210
  90 Percent confidence interval - lower         0.112       0.149
  90 Percent confidence interval - upper         0.216       0.278
  P-value H_0: RMSEA <= 0.050                    0.000       0.000
  P-value H_0: RMSEA >= 0.080                    0.996       1.000
                                                                  
  Robust RMSEA                                               0.167
  90 Percent confidence interval - lower                     0.128
  90 Percent confidence interval - upper                     0.209
  P-value H_0: Robust RMSEA <= 0.050                         0.000
  P-value H_0: Robust RMSEA >= 0.080                         1.000

Standardized Root Mean Square Residual:

  SRMR                                           0.080       0.080

Parameter Estimates:

  Standard errors                             Sandwich
  Information bread                           Observed
  Observed information based on                Hessian

Latent Variables:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
  GAToRSsn =~                                                           
    GAToRS5sn         1.000                               0.657    0.475
    GAToRS1sn         1.484    0.964    1.539    0.124    0.975    0.592
    GAToRS3sn         1.228    0.591    2.079    0.038    0.807    0.470
    GAToRS4sn         0.769    0.167    4.610    0.000    0.506    0.390
    GAToRS2sn         1.234    0.788    1.566    0.117    0.811    0.532

Variances:
                   Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
   .GAToRS5sn         1.482    0.334    4.441    0.000    1.482    0.774
   .GAToRS1sn         1.767    0.475    3.722    0.000    1.767    0.650
   .GAToRS3sn         2.292    0.280    8.180    0.000    2.292    0.779
   .GAToRS4sn         1.425    0.275    5.184    0.000    1.425    0.848
   .GAToRS2sn         1.668    0.331    5.044    0.000    1.668    0.717
    GAToRSsn          0.432    0.357    1.212    0.225    1.000    1.000



CFA first 6 Modification Indices: 
         lhs op       rhs     mi    epc sepc.lv sepc.all sepc.nox
14 GAToRS5sn ~~ GAToRS4sn 30.084  0.664   0.664    0.457    0.457
18 GAToRS1sn ~~ GAToRS2sn 12.184  0.750   0.750    0.437    0.437
12 GAToRS5sn ~~ GAToRS1sn  8.487 -0.523  -0.523   -0.323   -0.323
21 GAToRS4sn ~~ GAToRS2sn  8.255 -0.394  -0.394   -0.255   -0.255
17 GAToRS1sn ~~ GAToRS4sn  3.069 -0.271  -0.271   -0.171   -0.171
15 GAToRS5sn ~~ GAToRS2sn  0.925 -0.149  -0.149   -0.095   -0.095
### variable mean
questionnaireCAMs[[nameVariable]]  <- questionnaireCAMs %>%
  select(matches(regEx)) %>%
  rowMeans(na.rm = TRUE)

Draw CAMs and compute network indicators

### draw CAMs
CAMdrawn <- draw_CAM(dat_merged = CAMfiles_combined[[3]],
                     dat_nodes = CAMfiles_combined[[1]],ids_CAMs = "all",
                     plot_CAM = FALSE,
                     useCoordinates = TRUE,
                     relvertexsize = 3,
                     reledgesize = 1)
processing 432 CAMs... 
Warning: `graph.data.frame()` was deprecated in igraph 2.0.0.
ℹ Please use `graph_from_data_frame()` instead.
[1] "== ids_CAMs in drawnCAM"
### draw CAMs pre
nodes_pre <- CAMfiles_combined[[1]][CAMfiles_combined[[1]]$CAM %in% questionnaireCAMs$CAM_ID_pre,]
merged_pre <- CAMfiles_combined[[3]][CAMfiles_combined[[3]]$CAM.x %in% questionnaireCAMs$CAM_ID_pre,]

# CAMdrawn_pre <- draw_CAM(dat_merged = merged_pre,
#                      dat_nodes = nodes_pre,ids_CAMs = "all",
#                      plot_CAM = FALSE,
#                      useCoordinates = TRUE,
#                      relvertexsize = 3,
#                      reledgesize = 1)

### draw CAMs post
nodes_post <- CAMfiles_combined[[1]][CAMfiles_combined[[1]]$CAM %in% questionnaireCAMs$CAM_ID_post,]
merged_post <- CAMfiles_combined[[3]][CAMfiles_combined[[3]]$CAM.x %in% questionnaireCAMs$CAM_ID_post,]

# CAMdrawn_post <- draw_CAM(dat_merged = merged_post,
#                      dat_nodes = nodes_post,ids_CAMs = "all",
#                      plot_CAM = FALSE,
#                      useCoordinates = TRUE,
#                      relvertexsize = 3,
#                      reledgesize = 1)



### network indicators
tmp_microIndicator <- c("Rettungsroboter", "sozialer Assistenzroboter", "Vorteile", "Nachteile")
networkIndicators <- compute_indicatorsCAM(drawn_CAM = CAMdrawn, 
                                           micro_degree = tmp_microIndicator, 
                                           micro_valence = tmp_microIndicator, 
                                           micro_centr_clo = tmp_microIndicator, 
                                           micro_transitivity = tmp_microIndicator, 
                                           largestClique = FALSE)
Warning: `graph.density()` was deprecated in igraph 2.0.0.
ℹ Please use `edge_density()` instead.
Warning: The `types1` argument of `assortativity()` is deprecated as of igraph 1.6.0.
ℹ Please use the `values` argument instead.
Warning: `assortativity.degree()` was deprecated in igraph 2.0.0.
ℹ Please use `assortativity_degree()` instead.
### wordlists
CAMwordlist <- create_wordlist(
  dat_nodes =  CAMfiles_combined[[1]],
  dat_merged =  CAMfiles_combined[[3]],
  useSummarized = TRUE,
  order = "frequency",
  splitByValence = FALSE,
  comments = TRUE,
  raterSubsetWords = NULL,
  rater = FALSE
)
[1] "create_wordlist - use summarized words"
[1] 6404
[1] 6404
processing 432 CAMs... 
[1] "== ids_CAMs in drawnCAM"
DT::datatable(CAMwordlist, options = list(pageLength = 5)) 

Descriptive Analyses

describe sample

psych::describe(questionnaireCAMs[, c("socio_age")])
   vars   n  mean   sd median trimmed  mad min max range skew kurtosis   se
X1    1 215 30.19 8.55     29   29.03 7.41  18  67    49 1.66      3.6 0.58
table(questionnaireCAMs$socio_sex)

Female   Male 
    75    141 
table(questionnaireCAMs$socio_student)

 No Yes 
113  86 
table(questionnaireCAMs$socio_employment)

            Due to start a new job within the next month 
                                                       4 
                                               Full-Time 
                                                      84 
Not in paid work (e.g. homemaker', 'retired or disabled) 
                                                       7 
                                                   Other 
                                                      20 
                                               Part-Time 
                                                      56 
                            Unemployed (and job seeking) 
                                                      19 
## split by robot
psych::describe(socio_age ~ choosen_Robot, data = questionnaireCAMs)

 Descriptive statistics by group 
choosen_Robot: Rettungsroboter
          vars   n  mean   sd median trimmed  mad min max range skew kurtosis
socio_age    1 117 30.09 8.29     29   29.19 7.41  18  63    45 1.48      3.2
            se
socio_age 0.77
------------------------------------------------------------ 
choosen_Robot: sozialer Assistenzroboter
          vars  n  mean  sd median trimmed  mad min max range skew kurtosis  se
socio_age    1 98 30.31 8.9   28.5   28.91 6.67  18  67    49  1.8     3.74 0.9
table(questionnaireCAMs$socio_sex, questionnaireCAMs$choosen_Robot)
        
         Rettungsroboter sozialer Assistenzroboter
  Female              46                        29
  Male                71                        70

feedback to the study

Question: Haben Sie Feedback oder Kritik an der Online-Studie?

DT::datatable(questionnaireCAMs[,c("PROLIFIC_PID", "feedback_critic")], options = list(pageLength = 5)) 

technical problems CAMEL

DT::datatable(questionnaireCAMs[,c("PROLIFIC_PID", str_subset(string = colnames(questionnaireCAMs), pattern = "^feedCAM"))], options = list(pageLength = 5)) 
hist(questionnaire$feedCAM_repres)

summary(questionnaire$feedCAM_repres)
   Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
  2.000   6.000   6.000   6.023   7.000   7.000 

differences survey means

## split by robot
psych::describe(mean_AlmereAnxiety + mean_AlmereAttitude + mean_LiWangAnthropomorphism + mean_LiWangAutonomy + mean_GAToRSpp + mean_GAToRSpn + mean_GAToRSsp + mean_GAToRSsn ~ choosen_Robot, data = questionnaireCAMs)

 Descriptive statistics by group 
choosen_Robot: Rettungsroboter
                            vars   n mean   sd median trimmed  mad  min max
mean_AlmereAnxiety             1 117 2.57 0.87   2.75    2.58 0.74 1.00 5.0
mean_AlmereAttitude            2 117 4.16 0.62   4.33    4.21 0.49 1.67 5.0
mean_LiWangAnthropomorphism    3 117 2.59 1.14   2.40    2.54 1.48 1.00 5.0
mean_LiWangAutonomy            4 117 4.96 1.09   5.00    5.02 0.99 1.00 7.0
mean_GAToRSpp                  5 117 4.29 1.02   4.40    4.33 0.89 1.00 7.0
mean_GAToRSpn                  6 117 2.96 1.07   3.00    2.92 1.19 1.00 6.4
mean_GAToRSsp                  7 117 5.78 0.90   6.00    5.89 0.59 2.20 7.0
mean_GAToRSsn                  8 117 4.89 1.03   5.00    4.93 0.89 2.20 7.0
                            range  skew kurtosis   se
mean_AlmereAnxiety           4.00 -0.05    -0.33 0.08
mean_AlmereAttitude          3.33 -1.17     2.11 0.06
mean_LiWangAnthropomorphism  4.00  0.30    -0.96 0.11
mean_LiWangAutonomy          6.00 -0.80     1.42 0.10
mean_GAToRSpp                6.00 -0.46     0.46 0.09
mean_GAToRSpn                5.40  0.39     0.19 0.10
mean_GAToRSsp                4.80 -1.39     2.46 0.08
mean_GAToRSsn                4.80 -0.37    -0.25 0.10
------------------------------------------------------------ 
choosen_Robot: sozialer Assistenzroboter
                            vars  n mean   sd median trimmed  mad min  max
mean_AlmereAnxiety             1 99 2.30 0.83   2.25    2.29 1.11 1.0 4.00
mean_AlmereAttitude            2 99 3.92 0.65   4.00    3.96 0.49 1.0 5.00
mean_LiWangAnthropomorphism    3 99 2.82 1.15   2.80    2.77 1.19 1.0 5.60
mean_LiWangAutonomy            4 99 5.19 1.06   5.67    5.30 0.99 1.0 6.67
mean_GAToRSpp                  5 99 4.18 0.88   4.20    4.21 0.89 1.8 5.80
mean_GAToRSpn                  6 99 3.16 1.25   3.00    3.09 1.19 1.0 6.60
mean_GAToRSsp                  7 99 5.75 0.76   5.80    5.82 0.59 3.4 7.00
mean_GAToRSsn                  8 99 5.13 0.84   5.20    5.15 0.89 3.4 6.80
                            range  skew kurtosis   se
mean_AlmereAnxiety           3.00  0.16    -1.14 0.08
mean_AlmereAttitude          4.00 -1.11     3.12 0.06
mean_LiWangAnthropomorphism  4.60  0.28    -0.64 0.12
mean_LiWangAutonomy          5.67 -1.19     1.88 0.11
mean_GAToRSpp                4.00 -0.36    -0.18 0.09
mean_GAToRSpn                5.60  0.60    -0.04 0.13
mean_GAToRSsp                3.60 -0.88     1.06 0.08
mean_GAToRSsn                3.40 -0.21    -0.76 0.08

Analyses

Quantitative bird’s eye view

survey scales

Check for mean differences between robots

## mean_AlmereAnxiety
tmp <- aov(formula = mean_AlmereAnxiety ~ choosen_Robot, data = questionnaireCAMs)
if(summary(tmp)[[1]][["Pr(>F)"]][1] < .05){
  ggbetweenstats(
  data = questionnaireCAMs,
  x = choosen_Robot,
  y = mean_AlmereAnxiety
)
}
Warning in min(x): kein nicht-fehlendes Argument für min; gebe Inf zurück
Warning in max(x): kein nicht-fehlendes Argument für max; gebe -Inf zurück

## mean_AlmereAttitude
tmp <- aov(formula = mean_AlmereAttitude ~ choosen_Robot, data = questionnaireCAMs)
if(summary(tmp)[[1]][["Pr(>F)"]][1] < .05){
  ggbetweenstats(
  data = questionnaireCAMs,
  x = choosen_Robot,
  y = mean_AlmereAttitude
)
}
Warning in min(x): kein nicht-fehlendes Argument für min; gebe Inf zurück
Warning in min(x): kein nicht-fehlendes Argument für max; gebe -Inf zurück

## mean_LiWangAnthropomorphism
tmp <- aov(formula = mean_LiWangAnthropomorphism ~ choosen_Robot, data = questionnaireCAMs)
if(summary(tmp)[[1]][["Pr(>F)"]][1] < .05){
  ggbetweenstats(
  data = questionnaireCAMs,
  x = choosen_Robot,
  y = mean_LiWangAnthropomorphism
)
}

## mean_LiWangAutonomy
tmp <- aov(formula = mean_LiWangAutonomy ~ choosen_Robot, data = questionnaireCAMs)
if(summary(tmp)[[1]][["Pr(>F)"]][1] < .05){
  ggbetweenstats(
  data = questionnaireCAMs,
  x = choosen_Robot,
  y = mean_LiWangAutonomy
)
}

## mean_GAToRSpp
tmp <- aov(formula = mean_GAToRSpp ~ choosen_Robot, data = questionnaireCAMs)
if(summary(tmp)[[1]][["Pr(>F)"]][1] < .05){
  ggbetweenstats(
  data = questionnaireCAMs,
  x = choosen_Robot,
  y = mean_GAToRSpp
)
}

## mean_GAToRSpn
tmp <- aov(formula = mean_GAToRSpn ~ choosen_Robot, data = questionnaireCAMs)
if(summary(tmp)[[1]][["Pr(>F)"]][1] < .05){
  ggbetweenstats(
  data = questionnaireCAMs,
  x = choosen_Robot,
  y = mean_GAToRSpn
)
}

## mean_GAToRSsp
tmp <- aov(formula = mean_GAToRSsp ~ choosen_Robot, data = questionnaireCAMs)
if(summary(tmp)[[1]][["Pr(>F)"]][1] < .05){
  ggbetweenstats(
  data = questionnaireCAMs,
  x = choosen_Robot,
  y = mean_GAToRSsp
)
}

## mean_GAToRSsn
tmp <- aov(formula = mean_GAToRSsn ~ choosen_Robot, data = questionnaireCAMs)
if(summary(tmp)[[1]][["Pr(>F)"]][1] < .05){
  ggbetweenstats(
  data = questionnaireCAMs,
  x = choosen_Robot,
  y = mean_GAToRSsn
)
}

Check for mean differences of valence between robots

ggbetweenstats(
  data = questionnaireCAMs,
  x = choosen_Robot,
  y = mean_valence_macro_pre
)
Warning in min(x): kein nicht-fehlendes Argument für min; gebe Inf zurück
Warning in max(x): kein nicht-fehlendes Argument für max; gebe -Inf zurück

ggbetweenstats(
  data = questionnaireCAMs,
  x = choosen_Robot,
  y = mean_valence_macro_post
)
Warning in min(x): kein nicht-fehlendes Argument für min; gebe Inf zurück
Warning in min(x): kein nicht-fehlendes Argument für max; gebe -Inf zurück

plot(questionnaireCAMs$mean_AlmereAttitude, questionnaireCAMs$mean_valence_macro_pre)

cor(questionnaireCAMs$mean_AlmereAttitude, questionnaireCAMs$mean_valence_macro_pre)
[1] 0.4429144

pre, post CAMs

# prepare data
### add pre post
networkIndicators_pre$timepoint <- "pre"
networkIndicators_post$timepoint <- "post"

### long data format
networkIndicators_long <- rbind(networkIndicators_pre, networkIndicators_post)


### add ID
networkIndicators_long$ID <- c(1:(nrow(networkIndicators_long) / 2), 1:(nrow(networkIndicators_long) / 2))

### reformat variable
networkIndicators_long$timepoint <- factor(networkIndicators_long$timepoint, 
                                           levels = c("pre", "post"), 
                                           ordered = FALSE)

### add type robot
networkIndicators_long$typeRobot <- ifelse(test = !is.na(networkIndicators_long$valence_micro_Rettungsroboter), yes = "rescue robots", no = "socially assistive robots")
table(networkIndicators_long$typeRobot)

            rescue robots socially assistive robots 
                      234                       198 
table(questionnaireCAMs$choosen_Robot) * 2

          Rettungsroboter sozialer Assistenzroboter 
                      234                       198 
### post - pre difference of robot -> average valence
# ! all type of changes
fit1 <- afex::aov_car(mean_valence_macro ~ timepoint*typeRobot + Error(ID / timepoint),
                      data = networkIndicators_long)
Converting to factor: typeRobot
Contrasts set to contr.sum for the following variables: typeRobot
fit1a <- afex::aov_ez(id = "ID", dv = "mean_valence_macro",
                      data = networkIndicators_long, between=c("typeRobot"), within=c("timepoint"))
Converting to factor: typeRobot
Contrasts set to contr.sum for the following variables: typeRobot
# partical eta squared
anova(fit1, es = "pes")
Anova Table (Type 3 tests)

Response: mean_valence_macro
                    num Df den Df     MSE       F      pes    Pr(>F)    
typeRobot                1    214 0.37225 17.8759 0.077092 3.493e-05 ***
timepoint                1    214 0.01935 16.5368 0.071732 6.706e-05 ***
typeRobot:timepoint      1    214 0.01935  0.9891 0.004601    0.3211    
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# generalized eta squared
fit1a # > identical results
Anova Table (Type 3 tests)

Response: mean_valence_macro
               Effect     df  MSE         F   ges p.value
1           typeRobot 1, 214 0.37 17.88 ***  .074   <.001
2           timepoint 1, 214 0.02 16.54 ***  .004   <.001
3 typeRobot:timepoint 1, 214 0.02      0.99 <.001    .321
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
dfvalcor <- data_summary(networkIndicators_long, varname="mean_valence_macro",
                         groupnames=c("timepoint","typeRobot"))
Lade nötiges Paket: plyr
Warning: Paket 'plyr' wurde unter R Version 4.3.2 erstellt
------------------------------------------------------------------------------
You have loaded plyr after dplyr - this is likely to cause problems.
If you need functions from both plyr and dplyr, please load plyr first, then dplyr:
library(plyr); library(dplyr)
------------------------------------------------------------------------------

Attache Paket: 'plyr'
Die folgenden Objekte sind maskiert von 'package:dplyr':

    arrange, count, desc, failwith, id, mutate, rename, summarise,
    summarize
Das folgende Objekt ist maskiert 'package:purrr':

    compact
dfvalcor$timepoint <- factor(dfvalcor$timepoint, levels = c("pre", "post"))

p <- ggplot(dfvalcor, aes(x=timepoint, y=mean_valence_macro, fill=typeRobot)) +
  geom_bar(stat="identity", color="black",
           position=position_dodge()) +
  geom_errorbar(aes(ymin=mean_valence_macro-se, ymax=mean_valence_macro+se), width=.2,
                position=position_dodge(.9)) + ggplot_theme + ylab(label = "average emotional evaluation") + 
  theme(axis.title.x = element_text(size=20), axis.title.y = element_text(size=20), axis.text.x = element_text(size = 18), axis.text.y = element_text(size = 18), legend.text = element_text(size=16), legend.title = element_text(size=18))
print(p)

### post - pre difference of robot -> average valence
# ! only type of change B, D
tmp_ids <- questionnaireCAMs$PROLIFIC_PID[questionnaireCAMs$typeChange %in% c("B", "D")]


networkIndicators_long_BD <- networkIndicators_long[networkIndicators_long$participantCAM %in% tmp_ids,]
dim(networkIndicators_long); dim(networkIndicators_long_BD)
[1] 432  47
[1] 358  47
fit1 <- afex::aov_car(mean_valence_macro ~ timepoint*typeRobot + Error(ID / timepoint),
                      data = networkIndicators_long_BD)
Converting to factor: typeRobot
Contrasts set to contr.sum for the following variables: typeRobot
fit1a <- afex::aov_ez(id = "ID", dv = "mean_valence_macro",
                      data = networkIndicators_long_BD, between=c("typeRobot"), within=c("timepoint"))
Converting to factor: typeRobot
Contrasts set to contr.sum for the following variables: typeRobot
# partical eta squared
anova(fit1, es = "pes")
Anova Table (Type 3 tests)

Response: mean_valence_macro
                    num Df den Df     MSE       F      pes    Pr(>F)    
typeRobot                1    177 0.38914 15.0344 0.078290 0.0001486 ***
timepoint                1    177 0.02291 15.2898 0.079514 0.0001312 ***
typeRobot:timepoint      1    177 0.02291  1.1923 0.006691 0.2763455    
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# generalized eta squared
fit1a # > identical results
Anova Table (Type 3 tests)

Response: mean_valence_macro
               Effect     df  MSE         F   ges p.value
1           typeRobot 1, 177 0.39 15.03 ***  .074   <.001
2           timepoint 1, 177 0.02 15.29 ***  .005   <.001
3 typeRobot:timepoint 1, 177 0.02      1.19 <.001    .276
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
dfvalcor <- data_summary(networkIndicators_long_BD, varname="mean_valence_macro",
                         groupnames=c("timepoint","typeRobot"))

dfvalcor$timepoint <- factor(dfvalcor$timepoint, levels = c("pre", "post"))

p <- ggplot(dfvalcor, aes(x=timepoint, y=mean_valence_macro, fill=typeRobot)) +
  geom_bar(stat="identity", color="black",
           position=position_dodge()) +
  geom_errorbar(aes(ymin=mean_valence_macro-se, ymax=mean_valence_macro+se), width=.2,
                position=position_dodge(.9)) + ggplot_theme + ylab(label = "average emotional evaluation") + 
  theme(axis.title.x = element_text(size=20), axis.title.y = element_text(size=20), axis.text.x = element_text(size = 18), axis.text.y = element_text(size = 18), legend.text = element_text(size=16), legend.title = element_text(size=18))
print(p)

### post - pre difference of robot -> number of concepts
# ! all type of changes
fit1 <- afex::aov_car(num_nodes_macro ~ timepoint*typeRobot + Error(ID / timepoint),
                      data = networkIndicators_long)
Converting to factor: typeRobot
Contrasts set to contr.sum for the following variables: typeRobot
fit1a <- afex::aov_ez(id = "ID", dv = "num_nodes_macro",
                      data = networkIndicators_long, between=c("typeRobot"), within=c("timepoint"))
Converting to factor: typeRobot
Contrasts set to contr.sum for the following variables: typeRobot
# partical eta squared
anova(fit1, es = "pes")
Anova Table (Type 3 tests)

Response: num_nodes_macro
                    num Df den Df     MSE        F     pes Pr(>F)    
typeRobot                1    214 20.5675   1.5163 0.00704 0.2195    
timepoint                1    214  2.5012 199.2052 0.48210 <2e-16 ***
typeRobot:timepoint      1    214  2.5012   0.0249 0.00012 0.8748    
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# generalized eta squared
fit1a # > identical results
Anova Table (Type 3 tests)

Response: num_nodes_macro
               Effect     df   MSE          F   ges p.value
1           typeRobot 1, 214 20.57       1.52  .006    .220
2           timepoint 1, 214  2.50 199.21 ***  .092   <.001
3 typeRobot:timepoint 1, 214  2.50       0.02 <.001    .875
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
dfvalcor <- data_summary(networkIndicators_long, varname="num_nodes_macro",
                         groupnames=c("timepoint","typeRobot"))

dfvalcor$timepoint <- factor(dfvalcor$timepoint, levels = c("pre", "post"))

p <- ggplot(dfvalcor, aes(x=timepoint, y=num_nodes_macro, fill=typeRobot)) +
  geom_bar(stat="identity", color="black",
           position=position_dodge()) +
  geom_errorbar(aes(ymin=num_nodes_macro-se, ymax=num_nodes_macro+se), width=.2,
                position=position_dodge(.9)) + ggplot_theme + ylab(label = "average number of concepts") + 
  theme(axis.title.x = element_text(size=20), axis.title.y = element_text(size=20), axis.text.x = element_text(size = 18), axis.text.y = element_text(size = 18), legend.text = element_text(size=16), legend.title = element_text(size=18))
print(p)

### post - pre difference of robot -> number of concepts
# ! only type of change B, D
fit1 <- afex::aov_car(num_nodes_macro ~ timepoint*typeRobot + Error(ID / timepoint),
                      data = networkIndicators_long_BD)
Converting to factor: typeRobot
Contrasts set to contr.sum for the following variables: typeRobot
fit1a <- afex::aov_ez(id = "ID", dv = "num_nodes_macro",
                      data = networkIndicators_long_BD, between=c("typeRobot"), within=c("timepoint"))
Converting to factor: typeRobot
Contrasts set to contr.sum for the following variables: typeRobot
# partical eta squared
anova(fit1, es = "pes")
Anova Table (Type 3 tests)

Response: num_nodes_macro
                    num Df den Df     MSE        F     pes Pr(>F)    
typeRobot                1    177 21.7252   1.9233 0.01075 0.1672    
timepoint                1    177  2.4198 249.6170 0.58511 <2e-16 ***
typeRobot:timepoint      1    177  2.4198   0.0021 0.00001 0.9632    
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# generalized eta squared
fit1a # > identical results
Anova Table (Type 3 tests)

Response: num_nodes_macro
               Effect     df   MSE          F   ges p.value
1           typeRobot 1, 177 21.73       1.92  .010    .167
2           timepoint 1, 177  2.42 249.62 ***  .124   <.001
3 typeRobot:timepoint 1, 177  2.42       0.00 <.001    .963
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
dfvalcor <- data_summary(networkIndicators_long_BD, varname="num_nodes_macro",
                         groupnames=c("timepoint","typeRobot"))

dfvalcor$timepoint <- factor(dfvalcor$timepoint, levels = c("pre", "post"))

p <- ggplot(dfvalcor, aes(x=timepoint, y=num_nodes_macro, fill=typeRobot)) +
  geom_bar(stat="identity", color="black",
           position=position_dodge()) +
  geom_errorbar(aes(ymin=num_nodes_macro-se, ymax=num_nodes_macro+se), width=.2,
                position=position_dodge(.9)) + ggplot_theme + ylab(label = "average number of concepts") + 
  theme(axis.title.x = element_text(size=20), axis.title.y = element_text(size=20), axis.text.x = element_text(size = 18), axis.text.y = element_text(size = 18), legend.text = element_text(size=16), legend.title = element_text(size=18))
print(p)

Qualitative

Bird’s eye view (agg. CAMs)

for both robots

sel_ids <- questionnaireCAMs$PROLIFIC_PID

tmp_nodes <- CAMfiles_combined[[1]]

tmp_nodes$text_summarized <- str_remove(string = tmp_nodes$text_summarized, pattern = "_positive$|_negative$|_neutral$|_ambivalent$")
tmp_nodes$text_summarized <- str_trim(string = tmp_nodes$text_summarized)

CAMaggregated <- aggregate_CAMs(dat_merged = CAMfiles_combined[[3]], dat_nodes = tmp_nodes,
                                ids_CAMs = sel_ids)
[1] "aggregate_CAMs: using participant CAM ids"
processing 216 CAMs... 
[1] "== participantCAM in drawnCAM"
[1] "text_summarized column identified"
g = CAMaggregated[[2]]
g2 = simplify(CAMaggregated[[2]])
# plot(g2, edge.arrow.size=0.01,
#      vertex.size=diag(CAMaggregated[[1]]) / max(diag(CAMaggregated[[1]]))*20)

E(g2)$weight = sapply(E(g2), function(e) {
  length(all_shortest_paths(g, from=ends(g2, e)[1], to=ends(g2, e)[2])$res) } )
E(g2)$weight = E(g2)$weight / 2
# E(g2)$weight[E(g2)$weight == 1] <- NA

V(g2)$color[V(g2)$value <= .5 & V(g2)$value >= -.5] <- "yellow"

V(g2)$shape <- NA
V(g2)$shape <- ifelse(test = V(g2)$color == "yellow", yes = "square", no = "circle")



### > plot multiple times because of random layout
for(i in 1:5){
plot(g2, edge.arrow.size = 0,
     layout=layout_nicely, vertex.frame.color="black", asp = .5, margin = -0.1,
     vertex.size=diag(CAMaggregated[[1]]) / max(diag(CAMaggregated[[1]]))*10,
     vertex.label.cex = .9, 
     edge.weight=2, edge.width=(E(g2)$weight/10))
}

for rescue robot pre

sel_ids <- questionnaireCAMs$PROLIFIC_PID[questionnaireCAMs$choosen_Robot == "Rettungsroboter"]

tmp_nodes <- nodes_pre

tmp_nodes$text_summarized <- str_remove(string = tmp_nodes$text_summarized, pattern = "_positive$|_negative$|_neutral$|_ambivalent$")
tmp_nodes$text_summarized <- str_trim(string = tmp_nodes$text_summarized)

CAMaggregated_pre <- aggregate_CAMs(dat_merged = merged_pre, dat_nodes = tmp_nodes,
                                ids_CAMs = sel_ids)
[1] "aggregate_CAMs: using participant CAM ids"
processing 117 CAMs... 
[1] "provided participantCAM ID in drawnCAM"
[1] "text_summarized column identified"
g = CAMaggregated_pre[[2]]
g2 = simplify(CAMaggregated_pre[[2]])
# plot(g2, edge.arrow.size=0.01,
#      vertex.size=diag(CAMaggregated_pre[[1]]) / max(diag(CAMaggregated_pre[[1]]))*20)

E(g2)$weight = sapply(E(g2), function(e) {
  length(all_shortest_paths(g, from=ends(g2, e)[1], to=ends(g2, e)[2])$res) } )
E(g2)$weight = E(g2)$weight / 2
# E(g2)$weight[E(g2)$weight == 1] <- NA

V(g2)$color[V(g2)$value <= .5 & V(g2)$value >= -.5] <- "yellow"

V(g2)$shape <- NA
V(g2)$shape <- ifelse(test = V(g2)$color == "yellow", yes = "square", no = "circle")



### > plot multiple times because of random layout
for(i in 1:5){
plot(g2, edge.arrow.size = 0,
     layout=layout_nicely, vertex.frame.color="black", asp = .5, margin = -0.1,
     vertex.size=diag(CAMaggregated_pre[[1]]) / max(diag(CAMaggregated_pre[[1]]))*10,
     vertex.label.cex = .9, 
     edge.weight=2, edge.width=(E(g2)$weight/8))
}

for rescue robot post

sel_ids <- questionnaireCAMs$PROLIFIC_PID[questionnaireCAMs$choosen_Robot == "Rettungsroboter"]

tmp_nodes <- nodes_post

tmp_nodes$text_summarized <- str_remove(string = tmp_nodes$text_summarized, pattern = "_positive$|_negative$|_neutral$|_ambivalent$")
tmp_nodes$text_summarized <- str_trim(string = tmp_nodes$text_summarized)

CAMaggregated_post <- aggregate_CAMs(dat_merged = merged_post, dat_nodes = tmp_nodes,
                                ids_CAMs = sel_ids)
[1] "aggregate_CAMs: using participant CAM ids"
processing 117 CAMs... 
[1] "provided participantCAM ID in drawnCAM"
[1] "text_summarized column identified"
g = CAMaggregated_post[[2]]
g2 = simplify(CAMaggregated_post[[2]])
# plot(g2, edge.arrow.size=0.01,
#      vertex.size=diag(CAMaggregated_post[[1]]) / max(diag(CAMaggregated_post[[1]]))*20)

E(g2)$weight = sapply(E(g2), function(e) {
  length(all_shortest_paths(g, from=ends(g2, e)[1], to=ends(g2, e)[2])$res) } )
E(g2)$weight = E(g2)$weight / 2
# E(g2)$weight[E(g2)$weight == 1] <- NA

V(g2)$color[V(g2)$value <= .5 & V(g2)$value >= -.5] <- "yellow"

V(g2)$shape <- NA
V(g2)$shape <- ifelse(test = V(g2)$color == "yellow", yes = "square", no = "circle")



### > plot multiple times because of random layout
for(i in 1:5){
plot(g2, edge.arrow.size = 0,
     layout=layout_nicely, vertex.frame.color="black", asp = .5, margin = -0.1,
     vertex.size=diag(CAMaggregated_post[[1]]) / max(diag(CAMaggregated_post[[1]]))*10,
     vertex.label.cex = .9, 
     edge.weight=2, edge.width=(E(g2)$weight/8))
}

Differences of adjacency matrices:

data.frame(post = diag(CAMaggregated_post[[1]]), 
           pre = diag(CAMaggregated_pre[[1]]), 
           differences = diag(CAMaggregated_post[[1]]) - diag(CAMaggregated_pre[[1]]))
                post pre differences
Rettungsroboter  117 117           0
Nachteile        117 117           0
Vorteile         117 117           0
MT               120 119           1
AN                35  34           1
R                 78  56          22
T                 46  43           3
SIP               42  38           4
TP               125  40          85
SA               153 103          50
RCN               44 145        -101
RCPP              55  36          19
TL                83  67          16
RCPN              42  32          10
AP                24  21           3
HRIN              22  20           2
RCA               43  44          -1
HC                56  54           2
LC                14  13           1
HRIP               9  22         -13
SIN               23   4          19
# CAMaggregated_post[[1]] - CAMaggregated_pre[[1]]
# diag(CAMaggregated_post[[1]]) - diag(CAMaggregated_pre[[1]])

for social assistance robot pre

sel_ids <- questionnaireCAMs$PROLIFIC_PID[questionnaireCAMs$choosen_Robot == "sozialer Assistenzroboter"]

tmp_nodes <- nodes_pre

tmp_nodes$text_summarized <- str_remove(string = tmp_nodes$text_summarized, pattern = "_positive$|_negative$|_neutral$|_ambivalent$")
tmp_nodes$text_summarized <- str_trim(string = tmp_nodes$text_summarized)

CAMaggregated_pre <- aggregate_CAMs(dat_merged = merged_pre, dat_nodes = tmp_nodes,
                                ids_CAMs = sel_ids)
[1] "aggregate_CAMs: using participant CAM ids"
processing 99 CAMs... 
[1] "provided participantCAM ID in drawnCAM"
[1] "text_summarized column identified"
g = CAMaggregated_pre[[2]]
g2 = simplify(CAMaggregated_pre[[2]])
# plot(g2, edge.arrow.size=0.01,
#      vertex.size=diag(CAMaggregated_pre[[1]]) / max(diag(CAMaggregated_pre[[1]]))*20)

E(g2)$weight = sapply(E(g2), function(e) {
  length(all_shortest_paths(g, from=ends(g2, e)[1], to=ends(g2, e)[2])$res) } )
E(g2)$weight = E(g2)$weight / 2
# E(g2)$weight[E(g2)$weight == 1] <- NA

V(g2)$color[V(g2)$value <= .5 & V(g2)$value >= -.5] <- "yellow"

V(g2)$shape <- NA
V(g2)$shape <- ifelse(test = V(g2)$color == "yellow", yes = "square", no = "circle")



### > plot multiple times because of random layout
for(i in 1:5){
plot(g2, edge.arrow.size = 0,
     layout=layout_nicely, vertex.frame.color="black", asp = .5, margin = -0.1,
     vertex.size=diag(CAMaggregated_pre[[1]]) / max(diag(CAMaggregated_pre[[1]]))*10,
     vertex.label.cex = .9, 
     edge.weight=2, edge.width=(E(g2)$weight/8))
}

for social assistance robot post

sel_ids <- questionnaireCAMs$PROLIFIC_PID[questionnaireCAMs$choosen_Robot == "sozialer Assistenzroboter"]

tmp_nodes <- nodes_post

tmp_nodes$text_summarized <- str_remove(string = tmp_nodes$text_summarized, pattern = "_positive$|_negative$|_neutral$|_ambivalent$")
tmp_nodes$text_summarized <- str_trim(string = tmp_nodes$text_summarized)

CAMaggregated_post <- aggregate_CAMs(dat_merged = merged_post, dat_nodes = tmp_nodes,
                                ids_CAMs = sel_ids)
[1] "aggregate_CAMs: using participant CAM ids"
processing 99 CAMs... 
[1] "provided participantCAM ID in drawnCAM"
[1] "text_summarized column identified"
g = CAMaggregated_post[[2]]
g2 = simplify(CAMaggregated_post[[2]])
# plot(g2, edge.arrow.size=0.01,
#      vertex.size=diag(CAMaggregated_post[[1]]) / max(diag(CAMaggregated_post[[1]]))*20)

E(g2)$weight = sapply(E(g2), function(e) {
  length(all_shortest_paths(g, from=ends(g2, e)[1], to=ends(g2, e)[2])$res) } )
E(g2)$weight = E(g2)$weight / 2
# E(g2)$weight[E(g2)$weight == 1] <- NA

V(g2)$color[V(g2)$value <= .5 & V(g2)$value >= -.5] <- "yellow"

V(g2)$shape <- NA
V(g2)$shape <- ifelse(test = V(g2)$color == "yellow", yes = "square", no = "circle")



### > plot multiple times because of random layout
for(i in 1:5){
plot(g2, edge.arrow.size = 0,
     layout=layout_nicely, vertex.frame.color="black", asp = .5, margin = -0.1,
     vertex.size=diag(CAMaggregated_post[[1]]) / max(diag(CAMaggregated_post[[1]]))*10,
     vertex.label.cex = .9, 
     edge.weight=2, edge.width=(E(g2)$weight/8))
}

Differences of adjacency matrices:

data.frame(post = diag(CAMaggregated_post[[1]]), 
           pre = diag(CAMaggregated_pre[[1]]), 
           differences = diag(CAMaggregated_post[[1]]) - diag(CAMaggregated_pre[[1]]))
                         post pre differences
Soziale Assistenzroboter   99  99           0
Nachteile                  99  99           0
Vorteile                   99  99           0
RCPP                       54  39          15
LC                         37  35           2
T                          43  42           1
SIP                        92  90           2
HRIP                       78  67          11
AN                         50  43           7
SIN                       119 112           7
R                          33  26           7
HC                         37  36           1
AP                         25  45         -20
HRIN                       77  22          55
RCN                        54  64         -10
SA                         40  36           4
TP                         63  35          28
TL                         35  59         -24
RCPN                       41  37           4
RCA                        42  39           3
MT                         46  10          36
# CAMaggregated_post[[1]] - CAMaggregated_pre[[1]]
# sort(diag(CAMaggregated_post[[1]]) - diag(CAMaggregated_pre[[1]]))

check for difference

dat_ids <- questionnaireCAMs[, c("PROLIFIC_PID", "choosen_Robot", "typeChange", "CAMpre", "CAMpost")]
dat_ids <- dat_ids[dat_ids$typeChange %in% c("B", "D"),]

open text answers (adaptive question)

Question: Ihre angepasste Mind-Map hatte eine durchschnittliche emotionale Bewertung von XXX, diese war im Vergleich zu ihrer anfangs gezeichneten Mind-Map (durchschnittliche emotionale Bewertung von XXX) XXX. Bitte erklären Sie, warum Sie diese XXX wahrgenommen haben:

questionnaireCAMs$meanDifferencesCAMs <- round(x = questionnaireCAMs$mean_valence_macro_post - questionnaireCAMs$mean_valence_macro_pre, digits = 2)
DT::datatable(questionnaireCAMs[,c("meanDifferencesCAMs", "adaptiveQuestion", "feedback_critic")], options = list(pageLength = 5)) 
hist(questionnaireCAMs$meanDifferencesCAMs)

summary(questionnaireCAMs$meanDifferencesCAMs)
    Min.  1st Qu.   Median     Mean  3rd Qu.     Max. 
-0.42000 -0.05000  0.01500  0.05352  0.15000  1.21000 

Only for people who have added new concpets (typ B, D)

tmp <- questionnaireCAMs[questionnaireCAMs$PROLIFIC_PID %in% dat_ids$PROLIFIC_PID, ]


DT::datatable(tmp[,c("meanDifferencesCAMs", "adaptiveQuestion", "feedback_critic")], options = list(pageLength = 5)) 
hist(tmp$meanDifferencesCAMs)

summary(tmp$meanDifferencesCAMs)
    Min.  1st Qu.   Median     Mean  3rd Qu.     Max. 
-0.42000 -0.08000  0.05000  0.06128  0.18500  1.21000 

check for words

!!! in progress

list_comments <- list()
list_words <- list()
list_typeRobot <- list()
list_value <- list()


for(i in 1:nrow(dat_ids)){
 tmp_pre <- CAMfiles_combined[[1]][CAMfiles_combined[[1]]$participantCAM %in% dat_ids$PROLIFIC_PID[i] & CAMfiles_combined[[1]]$CAM %in% dat_ids$CAMpre[i],]

 tmp_post <- CAMfiles_combined[[1]][CAMfiles_combined[[1]]$participantCAM %in% dat_ids$PROLIFIC_PID[i] & CAMfiles_combined[[1]]$CAM %in% dat_ids$CAMpost[i],]
 
 
 
 list_comments[[i]] <- tmp_post$comment[!(tmp_post$x_pos %in% tmp_pre$x_pos & tmp_post$y_pos %in% tmp_pre$y_pos)]
  list_words[[i]] <- tmp_post$text_summarized[!(tmp_post$x_pos %in% tmp_pre$x_pos & tmp_post$y_pos %in% tmp_pre$y_pos)]
  list_typeRobot[[i]] <- rep(x = dat_ids$choosen_Robot[i], times = length(list_words[[i]]))
  
    list_value[[i]] <- tmp_post$value[!(tmp_post$x_pos %in% tmp_pre$x_pos & tmp_post$y_pos %in% tmp_pre$y_pos)]
}


barplot(table(unlist(list_value)))

tmp <- ifelse(test = unlist(list_value) == 10, yes = 0, no = unlist(list_value))
summary(tmp)
   Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
-3.0000 -2.0000  0.0000  0.2621  2.0000  3.0000 
summary(tmp[unlist(list_typeRobot) == "Rettungsroboter"])
   Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
-3.0000 -1.0000  1.0000  0.4439  2.0000  3.0000 
summary(tmp[unlist(list_typeRobot) == "sozialer Assistenzroboter"])
    Min.  1st Qu.   Median     Mean  3rd Qu.     Max. 
-3.00000 -2.00000  0.00000  0.06477  2.00000  3.00000 
table(unlist(list_words)[unlist(list_typeRobot) == "Rettungsroboter"])

  AN_ambivalent     AN_negative      AP_neutral     AP_positive     HC_negative 
              1               8               1               7              12 
HRIN_ambivalent   HRIN_negative   HRIP_positive     LC_positive     MT_negative 
              1               5               5               2              29 
    MT_positive    R_ambivalent      R_negative       R_neutral  RCA_ambivalent 
              1               1              46               1               7 
   RCA_negative     RCA_neutral    RCA_positive  RCN_ambivalent    RCN_negative 
              4               1               1               3               6 
    RCN_neutral    RCN_positive   RCPN_negative   RCPP_positive   SA_ambivalent 
             15               1              18              31               3 
    SA_negative      SA_neutral     SA_positive    SIN_negative  SIP_ambivalent 
              1               1              66               2               1 
   SIP_positive      T_positive   TL_ambivalent     TL_negative      TL_neutral 
             10              10               3              28               1 
  TP_ambivalent     TP_negative     TP_positive 
              1               1              84 
table(unlist(list_words)[unlist(list_typeRobot) == "sozialer Assistenzroboter"])

  AN_ambivalent     AN_negative     AP_positive     HC_negative HRIN_ambivalent 
              1              21              20              10               3 
  HRIN_negative    HRIN_neutral   HRIN_positive    HRIP_neutral   HRIP_positive 
             41               1               2               2              33 
    LC_positive     MT_negative      MT_neutral    R_ambivalent      R_negative 
              4              17               1               1              11 
 RCA_ambivalent    RCA_negative     RCA_neutral  RCN_ambivalent    RCN_negative 
             10               3               1               7               3 
    RCN_neutral   RCPN_negative    RCPN_neutral   RCPP_positive      SA_neutral 
              9              14               1              41               1 
    SA_positive    SIN_negative    SIP_positive       T_neutral      T_positive 
             26              39              25               1               8 
  TL_ambivalent     TL_negative      TL_neutral     TL_positive   TP_ambivalent 
              1               9               2               1               1 
    TP_positive 
             15 
DT::datatable(cbind(unlist(list_typeRobot),unlist(list_words), unlist(list_comments), unlist(list_value)), options = list(pageLength = 5)) 

Gender / Age / Trust differences

Create new variables

questionnaireCAMs$choosen_Robot_Sex <- paste0(questionnaireCAMs$choosen_Robot, "_", questionnaireCAMs$socio_sex)
questionnaireCAMs$socio_age_dummy <- ifelse(test = questionnaireCAMs$socio_age <= 25, yes = "young", no = ifelse(test = questionnaireCAMs$socio_age >= 40, yes = "old", no = "medium"))
table(questionnaireCAMs$socio_age_dummy)

medium    old  young 
   124     24     67 

Check for mean differences between robots

## mean_AlmereAnxiety
tmp <- aov(formula = mean_AlmereAnxiety ~ choosen_Robot_Sex, data = questionnaireCAMs)
if(summary(tmp)[[1]][["Pr(>F)"]][1] < .05){
  ggbetweenstats(
  data = questionnaireCAMs,
  x = choosen_Robot_Sex,
  y = mean_AlmereAnxiety
)
}
Warning in min(x): kein nicht-fehlendes Argument für min; gebe Inf zurück
Warning in max(x): kein nicht-fehlendes Argument für max; gebe -Inf zurück

## mean_AlmereAttitude
tmp <- aov(formula = mean_AlmereAttitude ~ choosen_Robot_Sex, data = questionnaireCAMs)
if(summary(tmp)[[1]][["Pr(>F)"]][1] < .05){
  ggbetweenstats(
  data = questionnaireCAMs,
  x = choosen_Robot_Sex,
  y = mean_AlmereAttitude
)
}
Warning in min(x): kein nicht-fehlendes Argument für min; gebe Inf zurück
Warning in min(x): kein nicht-fehlendes Argument für max; gebe -Inf zurück

## mean_LiWangAnthropomorphism
tmp <- aov(formula = mean_LiWangAnthropomorphism ~ choosen_Robot_Sex, data = questionnaireCAMs)
if(summary(tmp)[[1]][["Pr(>F)"]][1] < .05){
  ggbetweenstats(
  data = questionnaireCAMs,
  x = choosen_Robot_Sex,
  y = mean_LiWangAnthropomorphism
)
}

## mean_LiWangAutonomy
tmp <- aov(formula = mean_LiWangAutonomy ~ choosen_Robot_Sex, data = questionnaireCAMs)
if(summary(tmp)[[1]][["Pr(>F)"]][1] < .05){
  ggbetweenstats(
  data = questionnaireCAMs,
  x = choosen_Robot_Sex,
  y = mean_LiWangAutonomy
)
}

## mean_GAToRSpp
tmp <- aov(formula = mean_GAToRSpp ~ choosen_Robot_Sex, data = questionnaireCAMs)
if(summary(tmp)[[1]][["Pr(>F)"]][1] < .05){
  ggbetweenstats(
  data = questionnaireCAMs,
  x = choosen_Robot_Sex,
  y = mean_GAToRSpp
)
}

## mean_GAToRSpn
tmp <- aov(formula = mean_GAToRSpn ~ choosen_Robot_Sex, data = questionnaireCAMs)
if(summary(tmp)[[1]][["Pr(>F)"]][1] < .05){
  ggbetweenstats(
  data = questionnaireCAMs,
  x = choosen_Robot_Sex,
  y = mean_GAToRSpn
)
}

## mean_GAToRSsp
tmp <- aov(formula = mean_GAToRSsp ~ choosen_Robot_Sex, data = questionnaireCAMs)
if(summary(tmp)[[1]][["Pr(>F)"]][1] < .05){
  ggbetweenstats(
  data = questionnaireCAMs,
  x = choosen_Robot_Sex,
  y = mean_GAToRSsp
)
}
Warning in min(x): kein nicht-fehlendes Argument für min; gebe Inf zurück
Warning in min(x): kein nicht-fehlendes Argument für max; gebe -Inf zurück

## mean_GAToRSsn
tmp <- aov(formula = mean_GAToRSsn ~ choosen_Robot_Sex, data = questionnaireCAMs)
if(summary(tmp)[[1]][["Pr(>F)"]][1] < .05){
  ggbetweenstats(
  data = questionnaireCAMs,
  x = choosen_Robot_Sex,
  y = mean_GAToRSsn
)
}

Check for mean differences of valence between robots

ggbetweenstats(
  data = questionnaireCAMs,
  x = choosen_Robot_Sex,
  y = mean_valence_macro_pre
)
Warning in min(x): kein nicht-fehlendes Argument für min; gebe Inf zurück
Warning in max(x): kein nicht-fehlendes Argument für max; gebe -Inf zurück

ggbetweenstats(
  data = questionnaireCAMs,
  x = choosen_Robot_Sex,
  y = mean_valence_macro_post
)
Warning in min(x): kein nicht-fehlendes Argument für min; gebe Inf zurück
Warning in min(x): kein nicht-fehlendes Argument für max; gebe -Inf zurück

Qualitative

Quantitative