sjp.lmer

#devtools::install_github("sjPlot/sjmisc")

library(sjPlot)
## Warning: replacing previous import by 'grid::unit' when loading 'sjPlot'
library(lme4)
## Loading required package: Matrix
fit <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy)
fit
## Linear mixed model fit by REML ['lmerMod']
## Formula: Reaction ~ Days + (Days | Subject)
##    Data: sleepstudy
## REML criterion at convergence: 1743.628
## Random effects:
##  Groups   Name        Std.Dev. Corr
##  Subject  (Intercept) 24.740       
##           Days         5.922   0.07
##  Residual             25.592       
## Number of obs: 180, groups:  Subject, 18
## Fixed Effects:
## (Intercept)         Days  
##      251.41        10.47
sjp.lmer(fit)################!!
## Plotting random effects...

# sort by predictor Days
sjp.lmer(fit, sort.coef = "Days")
## Plotting random effects...

#sjp.lmer(fit, sort.coef = "intercept")
# sort all predictors
sjp.lmer(fit,
         facet.grid = FALSE,
         sort.coef = "sort.all")

sjp.lmer(fit,
         facet.grid = TRUE,
         sort.coef = "sort.all")
## Sorting each group of random intercept ('sort.all') is not possible when 'facet.grid = TRUE'.
## Sorting each group of random intercept ('sort.all') is not possible when 'facet.grid = TRUE'.
## Plotting random effects...

sjp.lmer

#For the next examples, we fit another model.
library(lme4)
# for sample data and tool functions
library(sjmisc)
data(efc)
head(efc)
##   c12hour e15relat e16sex e17age e42dep c82cop1 c83cop2 c84cop3 c85cop4
## 1      16        2      2     83      3       3       2       2       2
## 2     148        2      2     88      3       3       3       3       3
## 3      70        1      2     82      3       2       2       1       4
## 4     168        1      2     67      4       4       1       3       1
## 5     168        2      2     84      4       3       2       1       2
## 6      16        2      2     85      4       2       2       3       3
##   c86cop5 c87cop6 c88cop7 c89cop8 c90cop9 c160age c161sex c172code
## 1       1       1       2       3       3      56       2        2
## 2       4       1       3       2       2      54       2        2
## 3       1       1       1       4       3      80       1        1
## 4       1       1       1       2       4      69       1        2
## 5       2       2       1       4       4      47       2        2
## 6       3       2       2       1       1      56       1        2
##   c175empl barthtot neg_c_7 pos_v_4 quol_5 resttotn tot_sc_e n4pstu
## 1        1       75      12      12     14        0        4      0
## 2        1       75      20      11     10        4        0      0
## 3        0       35      11      13      7        0        1      2
## 4        0        0      10      15     12        2        0      3
## 5        0       25      12      15     19        2        1      2
## 6        1       60      19       9      8        1        3      2
##   nur_pst
## 1      NA
## 2      NA
## 3       2
## 4       3
## 5       2
## 6       2
# prepare group variable
efc$grp = as.factor(efc$e15relat)
levels(x = efc$grp) <- get_val_labels(efc$e15relat)###
## Warning: 'get_val_labels' is deprecated.
## Use 'get_labels' instead.
## See help("Deprecated")
head(efc$grp)
## [1] child          child          spouse/partner spouse/partner
## [5] child          child         
## 8 Levels: spouse/partner child sibling ... other, specify
# data frame for fitted model
mydf <- data.frame(neg_c_7 = as.numeric(efc$neg_c_7),
                   sex = as.factor(efc$c161sex),
                   c12hour = as.numeric(efc$c12hour),
                   barthel = as.numeric(efc$barthtot),
                   grp = efc$grp)
head(mydf)
##   neg_c_7 sex c12hour barthel            grp
## 1      12   2      16      75          child
## 2      20   2     148      75          child
## 3      11   1      70      35 spouse/partner
## 4      10   1     168       0 spouse/partner
## 5      12   2     168      25          child
## 6      19   1      16      60          child
# fit glmer
fit2 <- lmer(neg_c_7 ~ sex + c12hour + 
               barthel + (1|grp),data = mydf)
#With the type parameter, you can decide which effects to plot. type = "fe" plots the fixed effects.
# plot fixed effects
sjp.lmer(fit2, type = "fe")
## Computing approximate p-values via Wald chi-squared test...

## plot standardized fixed effects
sjp.lmer(fit2, type = "fe.std")
## Computing approximate p-values via Wald chi-squared test...

#Plotting slopes of fixed effects
# plot fixed effects slopes
sjp.lmer(fit2, type = "fe.pred")

#Plotting fixed effects slopes for each random intercept (group levels)

# random intercepts
ranef(fit2)
## $grp
##                         (Intercept)
## spouse/partner           0.62318882
## child                    0.42390010
## sibling                 -0.05435509
## daughter or son -in-law  0.05759517
## ancle/aunt              -0.10111187
## nephew/niece            -0.55060421
## cousin                  -0.11383605
## other, specify          -0.28477688
# fixed effects
fixef(fit2)
##  (Intercept)         sex2      c12hour      barthel 
## 14.135947298  0.478500191  0.003365667 -0.047946653
# plot fixed effects depending on group levels
sjp.lmer(fit2, type = "fe.ri")

# plot fixed effects depending on group levels
# emphasize group levels 1, 2 and 5
sjp.lmer(fit2, 
         type = "fe.ri", 
         emph.grp = c(1, 2, 5), 
         facet.grid = FALSE)

sjp.lmer(fit2, 
         type = "fe.ri", 
         emph.grp = c(1, 2, 5), 
         facet.grid = TRUE)
## Emphasizing groups only works in non-faceted plots. Use 'facet.grid = FALSE' to enable group emphasizing. 'emph.grp' was set to NULL.

#Grouping levels can also be specified via their names.
# plot fixed effects depending on group levels
# emphasize groups child and cousin
# only for predictor barthel
sjp.lmer(fit2, 
         type = "fe.ri", 
         emph.grp = c("child", "cousin"), 
         facet.grid = FALSE,
         vars = "barthel")

## plot fixed effect "c12hour" only,
# depending on group levels
sjp.lmer(fit2, 
         type = "fe.ri",
         vars = "c12hour")

# plot fixed effects correlation matrix
sjp.lmer(fit2, type = "fe.cor")
## Computing correlation using spearman-method with listwise-deletion...
## Warning: Removed 6 rows containing missing values (geom_point).

## plot qq-plot of random effects
sjp.lmer(fit2, type = "re.qq")
## Testing for normal distribution. Dots should be plotted along the line.

sjc.qclus

#Examples
## Not run:
# k-means clustering of mtcars-dataset
sjc.qclus(mtcars)
## Clustering Gap statistic ["clusGap"].
## B=100 simulated reference sets, k = 1..10
## 
##  --> Number of clusters (method 'Tibs2001SEmax', SE.factor=1): 2
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.

# k-means clustering of mtcars-dataset with 4 pre-defined
# groups in a faceted panel
sjc.qclus(airquality,
          groupcount = 4,
          facetCluster = TRUE)
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.

#sjc.qclus(iris,
#          groupcount = 3,
#          facetCluster = TRUE)

## End(Not run)
# k-means clustering of airquality data
# and saving the results. most likely, 3 cluster
# groups have been found (see below).
airgrp <- sjc.qclus(airquality)
## Clustering Gap statistic ["clusGap"].
## B=100 simulated reference sets, k = 1..10
## 
##  --> Number of clusters (method 'Tibs2001SEmax', SE.factor=1): 3
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.

# "re-plot" cluster groups, without computing

# new k-means cluster analysis.
#sjc.qclus(airquality,
#          groupcount = 3,
#          groups = airgrp$classification)

sjp.aov1

#Examples
library(sjmisc)
data(efc)
# note: "grpVar" does not need to be a factor.
# coercion to factor is done by the function
sjp.aov1(efc$c12hour, efc$e42dep)
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.

data(efc)
head(efc)
##   c12hour e15relat e16sex e17age e42dep c82cop1 c83cop2 c84cop3 c85cop4
## 1      16        2      2     83      3       3       2       2       2
## 2     148        2      2     88      3       3       3       3       3
## 3      70        1      2     82      3       2       2       1       4
## 4     168        1      2     67      4       4       1       3       1
## 5     168        2      2     84      4       3       2       1       2
## 6      16        2      2     85      4       2       2       3       3
##   c86cop5 c87cop6 c88cop7 c89cop8 c90cop9 c160age c161sex c172code
## 1       1       1       2       3       3      56       2        2
## 2       4       1       3       2       2      54       2        2
## 3       1       1       1       4       3      80       1        1
## 4       1       1       1       2       4      69       1        2
## 5       2       2       1       4       4      47       2        2
## 6       3       2       2       1       1      56       1        2
##   c175empl barthtot neg_c_7 pos_v_4 quol_5 resttotn tot_sc_e n4pstu
## 1        1       75      12      12     14        0        4      0
## 2        1       75      20      11     10        4        0      0
## 3        0       35      11      13      7        0        1      2
## 4        0        0      10      15     12        2        0      3
## 5        0       25      12      15     19        2        1      2
## 6        1       60      19       9      8        1        3      2
##   nur_pst
## 1      NA
## 2      NA
## 3       2
## 4       3
## 5       2
## 6       2
(efc.val <- get_labels(efc))
## $c12hour
## NULL
## 
## $e15relat
## [1] "spouse/partner"          "child"                  
## [3] "sibling"                 "daughter or son -in-law"
## [5] "ancle/aunt"              "nephew/niece"           
## [7] "cousin"                  "other, specify"         
## 
## $e16sex
## [1] "male"   "female"
## 
## $e17age
## NULL
## 
## $e42dep
## [1] "independent"          "slightly dependent"   "moderately dependent"
## [4] "severely dependent"  
## 
## $c82cop1
## [1] "never"     "sometimes" "often"     "always"   
## 
## $c83cop2
## [1] "Never"     "Sometimes" "Often"     "Always"   
## 
## $c84cop3
## [1] "Never"     "Sometimes" "Often"     "Always"   
## 
## $c85cop4
## [1] "Never"     "Sometimes" "Often"     "Always"   
## 
## $c86cop5
## [1] "Never"     "Sometimes" "Often"     "Always"   
## 
## $c87cop6
## [1] "Never"     "Sometimes" "Often"     "Always"   
## 
## $c88cop7
## [1] "Never"     "Sometimes" "Often"     "Always"   
## 
## $c89cop8
## [1] "never"     "sometimes" "often"     "always"   
## 
## $c90cop9
## [1] "never"     "sometimes" "often"     "always"   
## 
## $c160age
## NULL
## 
## $c161sex
## [1] "Male"   "Female"
## 
## $c172code
## [1] "low level of education"          "intermediate level of education"
## [3] "high level of education"        
## 
## $c175empl
## [1] "no"  "yes"
## 
## $barthtot
## NULL
## 
## $neg_c_7
## NULL
## 
## $pos_v_4
## NULL
## 
## $quol_5
## NULL
## 
## $resttotn
## NULL
## 
## $tot_sc_e
## NULL
## 
## $n4pstu
## [1] "No Care Level" "Care Level 1"  "Care Level 2"  "Care Level 3" 
## [5] "Care Level 3+"
## 
## $nur_pst
## [1] "Care Level 1"    "Care Level 2"    "Care Level 3/3+"
(efc.var <- get_label(efc))
##                                                                      c12hour 
##                    "average number of hours of care for the elder in a week" 
##                                                                     e15relat 
##                                                      "relationship to elder" 
##                                                                       e16sex 
##                                                             "elder's gender" 
##                                                                       e17age 
##                                                                 "elder' age" 
##                                                                       e42dep 
##               "how dependent is the elder? - subjective perception of carer" 
##                                                                      c82cop1 
##                                    "do you feel you cope well as caregiver?" 
##                                                                      c83cop2 
##                                      "do you find caregiving too demanding?" 
##                                                                      c84cop3 
## "does caregiving cause difficulties in your relationship with your friends?" 
##                                                                      c85cop4 
##              "does caregiving have negative effect on your physical health?" 
##                                                                      c86cop5 
##  "does caregiving cause difficulties in your relationship with your family?" 
##                                                                      c87cop6 
##                              "does caregiving cause financial difficulties?" 
##                                                                      c88cop7 
##                             "do you feel trapped in your role as caregiver?" 
##                                                                      c89cop8 
##                               "do you feel supported by friends/neighbours?" 
##                                                                      c90cop9 
##                                         "do you feel caregiving worthwhile?" 
##                                                                      c160age 
##                                                                 "carer' age" 
##                                                                      c161sex 
##                                                             "carer's gender" 
##                                                                     c172code 
##                  "carer's level of education: recoding of variable c172edu1" 
##                                                                     c175empl 
##                                                "are you currently employed?" 
##                                                                     barthtot 
##                                                  "Total score BARTHEL INDEX" 
##                                                                      neg_c_7 
##                                               "Negative impact with 7 items" 
##                                                                      pos_v_4 
##                                                "Positive value with 4 items" 
##                                                                       quol_5 
##                                                    "Quality of life 5 items" 
##                                                                     resttotn 
##                                                           "Job restrictions" 
##                                                                     tot_sc_e 
##                                                       "Services for elderly" 
##                                                                       n4pstu 
##                                                                 "Care level" 
##                                                                      nur_pst 
##                                                                 "Care level"
sjp.aov1(efc$c12hour,
         as.factor(efc$e42dep),
         axisLabels.y = efc.val['e42dep'],
         axisTitle.x = efc.var[['c12hour']],
         showModelSummary = TRUE)
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.

# -------------------------------------------------
# auto-detection of value labels and variable names
# -------------------------------------------------
sjp.aov1(efc$c12hour,
         efc$e42dep)
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.

# bar-plot, don't use this!
# however, if you dare to, adjust
# 'geom.size'...
sjp.aov1(efc$c12hour,
         efc$c172code,
         axisLabels.y = efc.val['c172code'],
         title = efc.var[['c12hour']],
         type = "bars",
         geom.size = 0.5)
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.

sjp.chi2

#Examples
# create data frame with 5 dichotomous (dummy) variables
mydf <- data.frame(as.factor(sample(1:2, 100, replace=TRUE)),
                   as.factor(sample(1:2, 100, replace=TRUE)),
                   as.factor(sample(1:2, 100, replace=TRUE)),
                   as.factor(sample(1:2, 100, replace=TRUE)),
                   as.factor(sample(1:2, 100, replace=TRUE)))
# create variable labels
items <- list(c("Item 1", "Item 2", "Item 3", "Item 4", "Item 5"))
# plot Chi2-contingency-table
sjp.chi2(mydf, axisLabels = items)

sjp.corr

#Examples
# create data frame with 5 random variables
mydf <- data.frame(cbind(runif(10),
                         runif(10),
                         runif(10),
                         runif(10),
                         runif(10)))
# plot correlation matrix using circles
sjp.corr(mydf)
## Computing correlation using spearman-method with listwise-deletion...
## Warning: Removed 10 rows containing missing values (geom_point).

# plot correlation matrix using square tiles without diagram background
sjp.corr(mydf, type = "tile")
## Computing correlation using spearman-method with listwise-deletion...

# -------------------------------
# Data from the EUROFAMCARE sample dataset
# -------------------------------
library(sjmisc)
data(efc)
# retrieve variable and value labels
varlabs <- get_label(efc)
# create data frame
vars.index <- c(1, 4, 15, 19, 20, 21, 22, 24, 25)
mydf <- data.frame(efc[, vars.index])
head(mydf)
##   c12hour e17age c160age barthtot neg_c_7 pos_v_4 quol_5 tot_sc_e n4pstu
## 1      16     83      56       75      12      12     14        4      0
## 2     148     88      54       75      20      11     10        0      0
## 3      70     82      80       35      11      13      7        1      2
## 4     168     67      69        0      10      15     12        0      3
## 5     168     84      47       25      12      15     19        1      2
## 6      16     85      56       60      19       9      8        3      2
colnames(mydf) <- varlabs[vars.index]
# show legend
sjp.corr(mydf, type = "tile", hideLegend = FALSE)
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Computing correlation using spearman-method with listwise-deletion...

# -------------------------------
# auto-detection of labels
# -------------------------------
# blank theme
#sjp.setTheme(theme = "blank", axis.angle.x = 90)
sjp.corr(efc[, vars.index])
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Computing correlation using spearman-method with listwise-deletion...
## Warning: Removed 36 rows containing missing values (geom_point).

sjt.df

data(efc)
head(efc)
##   c12hour e15relat e16sex e17age e42dep c82cop1 c83cop2 c84cop3 c85cop4
## 1      16        2      2     83      3       3       2       2       2
## 2     148        2      2     88      3       3       3       3       3
## 3      70        1      2     82      3       2       2       1       4
## 4     168        1      2     67      4       4       1       3       1
## 5     168        2      2     84      4       3       2       1       2
## 6      16        2      2     85      4       2       2       3       3
##   c86cop5 c87cop6 c88cop7 c89cop8 c90cop9 c160age c161sex c172code
## 1       1       1       2       3       3      56       2        2
## 2       4       1       3       2       2      54       2        2
## 3       1       1       1       4       3      80       1        1
## 4       1       1       1       2       4      69       1        2
## 5       2       2       1       4       4      47       2        2
## 6       3       2       2       1       1      56       1        2
##   c175empl barthtot neg_c_7 pos_v_4 quol_5 resttotn tot_sc_e n4pstu
## 1        1       75      12      12     14        0        4      0
## 2        1       75      20      11     10        4        0      0
## 3        0       35      11      13      7        0        1      2
## 4        0        0      10      15     12        2        0      3
## 5        0       25      12      15     19        2        1      2
## 6        1       60      19       9      8        1        3      2
##   nur_pst
## 1      NA
## 2      NA
## 3       2
## 4       3
## 5       2
## 6       2
#sji.viewSPSS(efc)

#Description and content of data frames
sjt.df(efc)
## 
  |                                                                       
  |                                                                 |   0%
  |                                                                       
  |==                                                               |   4%
  |                                                                       
  |=====                                                            |   8%
  |                                                                       
  |========                                                         |  12%
  |                                                                       
  |==========                                                       |  15%
  |                                                                       
  |============                                                     |  19%
  |                                                                       
  |===============                                                  |  23%
  |                                                                       
  |==================                                               |  27%
  |                                                                       
  |====================                                             |  31%
  |                                                                       
  |======================                                           |  35%
  |                                                                       
  |=========================                                        |  38%
  |                                                                       
  |============================                                     |  42%
  |                                                                       
  |==============================                                   |  46%
  |                                                                       
  |================================                                 |  50%
  |                                                                       
  |===================================                              |  54%
  |                                                                       
  |======================================                           |  58%
  |                                                                       
  |========================================                         |  62%
  |                                                                       
  |==========================================                       |  65%
  |                                                                       
  |=============================================                    |  69%
  |                                                                       
  |================================================                 |  73%
  |                                                                       
  |==================================================               |  77%
  |                                                                       
  |====================================================             |  81%
  |                                                                       
  |=======================================================          |  85%
  |                                                                       
  |==========================================================       |  88%
  |                                                                       
  |============================================================     |  92%
  |                                                                       
  |==============================================================   |  96%
  |                                                                       
  |=================================================================| 100%

sjp.glm

#Examples
# prepare dichotomous dependent variable
data(swiss)
head(swiss)
##              Fertility Agriculture Examination Education Catholic
## Courtelary        80.2        17.0          15        12     9.96
## Delemont          83.1        45.1           6         9    84.84
## Franches-Mnt      92.5        39.7           5         5    93.40
## Moutier           85.8        36.5          12         7    33.77
## Neuveville        76.9        43.5          17        15     5.16
## Porrentruy        76.1        35.3           9         7    90.57
##              Infant.Mortality
## Courtelary               22.2
## Delemont                 22.2
## Franches-Mnt             20.2
## Moutier                  20.3
## Neuveville               20.6
## Porrentruy               26.6
y <- ifelse(swiss$Fertility < median(swiss$Fertility), 0, 1)
# fit model
fitOR <- glm(y ~ swiss$Education + swiss$Examination + 
               swiss$Infant.Mortality + swiss$Catholic,
             family = binomial(link = "logit"))
# print Odds Ratios as dots
sjp.glm(fitOR)
## Waiting for profiling to be done...

# print Odds Ratios as bars
sjp.glm(fitOR, type = "bars", geom.size = .3)
## Waiting for profiling to be done...

# -------------------------------
# Predictors for negative impact
# of care. Data from the EUROFAMCARE
# sample dataset
# -------------------------------
library(sjmisc)
data(efc)
# retrieve predictor variable labels
labs <- get_label(efc)
predlab <- c(labs[['c161sex']],
             paste0(labs[['e42dep']], " (slightly)"),
             paste0(labs[['e42dep']], " (moderate)"),
             paste0(labs[['e42dep']], " (severely)"),
             labs[['barthtot']],
             paste0(labs[['c172code']], " (mid)"),
             paste0(labs[['c172code']], " (high)"))
# create binary response
y <- ifelse(efc$neg_c_7 < median(na.omit(efc$neg_c_7)), 0, 1)
# create dummy variables for educational status
edu.mid <- ifelse(efc$c172code == 2, 1, 0)
edu.high <- ifelse(efc$c172code == 3, 1, 0)
# create data frame for fitted model
mydf <- data.frame(y = as.factor(y),
                   sex = as.factor(efc$c161sex),
                   dep = as.factor(efc$e42dep),
                   barthel = as.numeric(efc$barthtot),
                   edu.mid = as.factor(edu.mid),
                   edu.hi = as.factor(edu.high))
# fit model
fit <- glm(y ~., data = mydf, family = binomial(link = "logit"))
# plot odds
sjp.glm(fit,
        title = labs[['neg_c_7']],
        axisLabels.y = predlab)
## Waiting for profiling to be done...

# plot probability curves (predicted probabilities)

# of coefficients
#sjp.glm(fit,
#        title = labs[['neg_c_7']],
#        axisLabels.y = predlab,
#        type = "prob")

sjp.glmer

#Examples
library(lme4)
library(sjmisc)
# create binary response
sleepstudy$Reaction.dicho <- dicho(sleepstudy$Reaction, dich.by = "md")
# fit model
fit <- glmer(Reaction.dicho ~ Days + (Days | Subject),
             sleepstudy,
             family = binomial("logit"))

# simple plot
sjp.glmer(fit)
## Plotting random effects...

# sort by predictor Days
sjp.glmer(fit, sort.coef = "Days")
## Plotting random effects...

data(efc)
# create binary response
efc$hi_qol <- dicho(efc$quol_5)
# prepare group variable
efc$grp = as.factor(efc$e15relat)
levels(x = efc$grp) <- get_labels(efc$e15relat)
# data frame for fitted model
mydf <- data.frame(hi_qol = as.factor(efc$hi_qol),
                   sex = as.factor(efc$c161sex),
                   c12hour = as.numeric(efc$c12hour),
                   neg_c_7 = as.numeric(efc$neg_c_7),
                   grp = efc$grp)
# fit glmer
fit <- glmer(hi_qol ~ sex + c12hour + neg_c_7 + (1|grp),
             data = mydf,
             family = binomial("logit"))
# plot and sort fixed effects
sjp.glmer(fit,
          type = "fe",
          sort.coef = TRUE)

# plot probability curves (predicted probabilities)
# for each covariate, grouped by random intercepts
# in integrated plots, emphasizing groups 1 and 4

#sjp.glmer(fit,
#          type = "ri.pc",
#          emph.grp = c(1, 4),
#          facet.grid = FALSE)

# plot probability curve (predicted probabilities)
# of fixed effect, only for coefficient "neg_c_7"

#sjp.glmer(fit,
#          type = "fe.pc",
#          vars = "neg_c_7")

sjp.glmm

#Examples
# prepare dummy variables for binary logistic regression
y1 <- ifelse(swiss$Fertility < median(swiss$Fertility), 0, 1)
y2 <- ifelse(swiss$Infant.Mortality < median(swiss$Infant.Mortality), 0, 1)
y3 <- ifelse(swiss$Agriculture<median(swiss$Agriculture), 0, 1)
# Now fit the models. Note that all models share the same predictors
# and only differ in their dependent variable (y1, y2 and y3)
fitOR1 <- glm(y1 ~ swiss$Education + swiss$Examination + swiss$Catholic,
              family = binomial(link = "logit"))
fitOR2 <- glm(y2 ~ swiss$Education + swiss$Examination + swiss$Catholic,
              family = binomial(link = "logit"))
fitOR3 <- glm(y3 ~ swiss$Education + swiss$Examination + swiss$Catholic,
              family = binomial(link = "logit"))
# plot multiple models
sjp.glmm(fitOR1, fitOR2, fitOR3, facet.grid = TRUE)
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## Warning: glm.fit: fitted probabilities numerically 0 or 1 occurred
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.
## Warning: position_dodge requires non-overlapping x intervals
## Warning: position_dodge requires non-overlapping x intervals
## Warning: position_dodge requires non-overlapping x intervals
## Warning: position_dodge requires non-overlapping x intervals
## Warning: position_dodge requires non-overlapping x intervals
## Warning: position_dodge requires non-overlapping x intervals

# plot multiple models with legend labels and point shapes instead of value labels
sjp.glmm(fitOR1, fitOR2, fitOR3,
         labelDependentVariables = c("Fertility",
                                     "Infant Mortality",
                                     "Agriculture"),
         showValueLabels = FALSE,
         showPValueLabels = FALSE,
         fade.ns = TRUE,
         usePShapes = TRUE)
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## Warning: glm.fit: fitted probabilities numerically 0 or 1 occurred
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.
## Warning: position_dodge requires non-overlapping x intervals
## Warning: position_dodge requires non-overlapping x intervals

# plot multiple models from nested lists argument
all.models <- list()
all.models[[1]] <- fitOR1
all.models[[2]] <- fitOR2
all.models[[3]] <- fitOR3
sjp.glmm(all.models)
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## Warning: glm.fit: fitted probabilities numerically 0 or 1 occurred
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.
## Warning: position_dodge requires non-overlapping x intervals
## Warning: position_dodge requires non-overlapping x intervals

# -------------------------------
# Predictors for negative impact
# of care. Data from the EUROFAMCARE
# sample dataset
# -------------------------------
library(sjmisc)
data(efc)
# create binary response

y <- ifelse(efc$neg_c_7 < median(na.omit(efc$neg_c_7)), 0, 1)
# create dummy variables for educational status
edu.mid <- ifelse(efc$c172code == 2, 1, 0)
edu.high <- ifelse(efc$c172code == 3, 1, 0)
# create data frame for fitted model
mydat <- data.frame(y = as.factor(y),
                    sex = as.factor(efc$c161sex),
                    dep = as.factor(efc$e42dep),
                    barthel = as.numeric(efc$barthtot),
                    edu.mid = as.factor(edu.mid),
                    edu.hi = as.factor(edu.high))
fit1 <- glm(y ~ sex + edu.mid + edu.hi,
            data = mydat,
            family = binomial(link = "logit"))
fit2 <- update(fit1, . ~ . + barthel)
fit3 <- update(fit2, . ~ . + dep)
sjp.glmm(fit1, fit2, fit3)
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.
## Warning: position_dodge requires non-overlapping x intervals
## Warning: position_dodge requires non-overlapping x intervals

sjp.gpt

#Examples
library(sjmisc)
data(efc)
# the proportion of dependency levels in female
# elderly, for each family carer's relationship
# to elderly
sjp.gpt(efc$e42dep, efc$e16sex, efc$e15relat)

# proportion of educational levels in highest
# dependency category of elderly, for different
# care levels
sjp.gpt(efc$c172code, efc$e42dep, efc$n4pstu)

sjp.grpfrq

#Examples
# histrogram with EUROFAMCARE sample dataset
library(sjmisc)
data(efc)
efc.val <- get_labels(efc)
efc.var <- get_label(efc)
sjp.grpfrq(efc$e17age,
           efc$e16sex,
           title = efc.var['e17age'],
           type = "hist",
           showValueLabels = FALSE,
           showMeanIntercept = TRUE)
# boxplot
sjp.grpfrq(efc$e17age,
           efc$e42dep,
           type = "box")
# -------------------------------------------------
# auto-detection of value labels and variable names
# -------------------------------------------------
# grouped bars using necessary y-limit
sjp.grpfrq(efc$e42dep,
           efc$e16sex,
           title = NULL)
# box plots with interaction variable
sjp.grpfrq(efc$e17age,
           efc$e42dep,
           interactionVar = efc$e16sex,
           title = paste(efc.var['e17age'],
                         "by",
                         efc.var['e42dep'],
                         "and",
                         efc.var['e16sex']),
           axisLabels.x = efc.val[['e17age']],
           interactionVarLabels = efc.val[['e16sex']],
           legendTitle = efc.var['e42dep'],
           legendLabels = efc.val[['e42dep']],
           type = "box")
# Grouped bar plot ranging from 1 to 28 (though scale starts with 7)
sjp.grpfrq(efc$neg_c_7,
           efc$e42dep,
           showValueLabels = FALSE,
           startAxisAt = 1)
# Same grouped bar plot ranging from 7 to 28
sjp.grpfrq(efc$neg_c_7,
           efc$e42dep,
           showValueLabels = FALSE)

sjp.int

#Examples
# Note that the data sets used in this example may not be perfectly suitable for
# fitting linear models. I just used them because they are part of the R-software.
# fit "dummy" model. Note that moderator should enter
# first the model, followed by predictor. Else, use
# argument "swapPredictors" to change predictor on
# x-axis with moderator
fit <- lm(weight ~ Diet * Time, data = ChickWeight)
# show summary to see significant interactions
summary(fit)
## 
## Call:
## lm(formula = weight ~ Diet * Time, data = ChickWeight)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -135.425  -13.757   -1.311   11.069  130.391 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  30.9310     4.2468   7.283 1.09e-12 ***
## Diet2        -2.2974     7.2672  -0.316  0.75202    
## Diet3       -12.6807     7.2672  -1.745  0.08154 .  
## Diet4        -0.1389     7.2865  -0.019  0.98480    
## Time          6.8418     0.3408  20.076  < 2e-16 ***
## Diet2:Time    1.7673     0.5717   3.092  0.00209 ** 
## Diet3:Time    4.5811     0.5717   8.014 6.33e-15 ***
## Diet4:Time    2.8726     0.5781   4.969 8.92e-07 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 34.07 on 570 degrees of freedom
## Multiple R-squared:  0.773,  Adjusted R-squared:  0.7702 
## F-statistic: 277.3 on 7 and 570 DF,  p-value: < 2.2e-16
# plot regression line of interaction terms, including value labels
sjp.int(fit, type = "eff", showValueLabels = TRUE)
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.

# load sample data set
library(sjmisc)
data(efc)
# create data frame with variables that should be included
# in the model
mydf <- data.frame(usage = efc$tot_sc_e,
                   sex = efc$c161sex,
                   education = efc$c172code,
                   burden = efc$neg_c_7,
                   dependency = efc$e42dep)
# convert gender predictor to factor

mydf$sex <- relevel(factor(mydf$sex), ref = "2")
# fit "dummy" model
fit <- lm(usage ~ .*., data = mydf)
summary(fit)
## 
## Call:
## lm(formula = usage ~ . * ., data = mydf)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -2.4070 -0.8149 -0.2876  0.3931  8.2089 
## 
## Coefficients:
##                      Estimate Std. Error t value Pr(>|t|)  
## (Intercept)           1.75061    0.71461   2.450   0.0145 *
## sex1                 -0.09252    0.47518  -0.195   0.8457  
## education            -0.30402    0.27913  -1.089   0.2764  
## burden               -0.08038    0.05468  -1.470   0.1419  
## dependency           -0.45307    0.21478  -2.109   0.0352 *
## sex1:education       -0.04657    0.15463  -0.301   0.7634  
## sex1:burden          -0.02251    0.02785  -0.808   0.4192  
## sex1:dependency       0.16118    0.11559   1.394   0.1636  
## education:burden      0.01110    0.01773   0.626   0.5316  
## education:dependency  0.13983    0.08095   1.727   0.0845 .
## burden:dependency     0.02880    0.01245   2.314   0.0209 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.221 on 822 degrees of freedom
##   (75 observations deleted due to missingness)
## Multiple R-squared:  0.06001,    Adjusted R-squared:  0.04858 
## F-statistic: 5.248 on 10 and 822 DF,  p-value: 1.661e-07
# plot interactions. note that type = "cond" only considers
# significant interactions by default. use "plevel" to
# adjust p-level sensivity
sjp.int(fit, type = "cond")
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.

# plot only selected interaction term for
# type = "eff"
sjp.int(fit, type = "eff", int.term = "sex*education")

# plot interactions, using mean and sd as moderator
# values to calculate interaction effect
sjp.int(fit, type = "eff", moderatorValues = "meansd")

sjp.int(fit, type = "cond", moderatorValues = "meansd")
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.

# plot interactions, including those with p-value up to 0.1
sjp.int(fit,
        type = "cond",
        plevel = 0.1,
        showInterceptLines = TRUE)
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.

# -------------------------------
# Plot estimated marginal means
# -------------------------------
# load sample data set
library(sjmisc)
data(efc)
# create data frame with variables that should be included
# in the model
mydf <- data.frame(burden = efc$neg_c_7,
                   sex = efc$c161sex,
                   education = efc$c172code)
# convert gender predictor to factor
mydf$sex <- factor(mydf$sex)
mydf$education <- factor(mydf$education)
# name factor levels and dependent variable
levels(mydf$sex) <- c("female", "male")
levels(mydf$education) <- c("low", "mid", "high")
mydf$burden <- set_label(mydf$burden, "care burden")
# fit "dummy" model
fit <- lm(burden ~ .*., data = mydf)
summary(fit)
## 
## Call:
## lm(formula = burden ~ . * ., data = mydf)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -5.6321 -2.8491 -0.8491  2.1509 16.1509 
## 
## Coefficients:
##                       Estimate Std. Error t value Pr(>|t|)    
## (Intercept)            11.0244     0.6068  18.169   <2e-16 ***
## sexmale                 1.0632     0.6916   1.537    0.125    
## educationmid            0.2524     0.7092   0.356    0.722    
## educationhigh           0.6495     0.8344   0.778    0.437    
## sexmale:educationmid   -0.4909     0.8073  -0.608    0.543    
## sexmale:educationhigh  -0.1050     0.9741  -0.108    0.914    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 3.885 on 827 degrees of freedom
##   (75 observations deleted due to missingness)
## Multiple R-squared:  0.01098,    Adjusted R-squared:  0.005005 
## F-statistic: 1.837 on 5 and 827 DF,  p-value: 0.1032
# plot marginal means of interactions, no interaction found
sjp.int(fit, type = "emm")
## Warning: No significant interactions found...
# plot marginal means of interactions, including those with p-value up to 1
sjp.int(fit, type = "emm", plevel = 1)

# swap predictors
sjp.int(fit,
        type = "emm",
        plevel = 1,
        swapPredictors = TRUE)

# Predictors for negative impact of care.
# Data from the EUROFAMCARE sample dataset
library(sjmisc)
data(efc)
# create binary response
y <- ifelse(efc$neg_c_7 < median(stats::na.omit(efc$neg_c_7)), 0, 1)
# create data frame for fitted model
mydf <- data.frame(y = as.factor(y),
                   sex = as.factor(efc$c161sex),
                   barthel = as.numeric(efc$barthtot))
# fit model
fit <- glm(y ~ sex * barthel,
           data = mydf,
           family = binomial(link = "logit"))
# plot interaction, increase p-level sensivity
sjp.int(fit,
        type = "eff",
        legendLabels = get_labels(efc$c161sex),
        plevel = 0.1)

sjp.int(fit,
        type = "cond",
        legendLabels = get_labels(efc$c161sex),
        plevel = 0.1)
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.

## Not run:
# -------------------------------
# Plot effects
# -------------------------------
# add continuous variable
mydf$barthel <- efc$barthtot
# re-fit model with continuous variable
fit <- lm(burden ~ .*., data = mydf)
# plot effects
sjp.int(fit, type = "eff", showCI = TRUE)
# plot effects, faceted
sjp.int(fit,
        type = "eff",
        int.plot.index = 3,
        showCI = TRUE,
        facet.grid = TRUE)
## End(Not run)

sjp.likert

#Examples
# prepare data for dichotomous likert scale, 5 items
likert_2 <- data.frame(
  as.factor(sample(1:2, 500, replace = TRUE, prob = c(0.3,0.7))),
  as.factor(sample(1:2, 500, replace = TRUE, prob = c(0.6,0.4))),
  as.factor(sample(1:2, 500, replace = TRUE, prob = c(0.25,0.75))),
  as.factor(sample(1:2, 500, replace = TRUE, prob = c(0.9,0.1))),
  as.factor(sample(1:2, 500, replace = TRUE, prob = c(0.35,0.65))))
# create labels
levels_2 <- c("Agree", "Disagree")
# prepare data for 4-category likert scale, with neutral category 5 items
Q1 <- as.factor(sample(1:4, 500, replace = TRUE, prob = c(0.2, 0.3, 0.1, 0.4)))
Q2 <- as.factor(sample(1:4, 500, replace = TRUE, prob = c(0.5, 0.25, 0.15, 0.1)))
Q3 <- as.factor(sample(1:4, 500, replace = TRUE, prob = c(0.25, 0.1, 0.4, 0.25)))
Q4 <- as.factor(sample(1:4, 500, replace = TRUE, prob = c(0.1, 0.4, 0.4, 0.1)))
Q5 <- as.factor(sample(1:4, 500, replace = TRUE, prob = c(0.35, 0.25, 0.15, 0.25)))
likert_4 <- data.frame(Q1, Q2, Q3, Q4, Q5)
# create labels
levels_4 <- c("Strongly agree",
              "Agree",
              "Disagree",
              "Strongly Disagree",
              "Don't know")
# prepare data for 6-category likert scale, 5 items
likert_6 <- data.frame()
Q1 <- as.factor(sample(1:6, 500, replace = TRUE, prob = c(0.2,0.1,0.1,0.3,0.2,0.1)))
Q2 <- as.factor(sample(1:6, 500, replace = TRUE, prob = c(0.15,0.15,0.3,0.1,0.1,0.2)))
Q3 <- as.factor(sample(1:6, 500, replace = TRUE, prob = c(0.2,0.25,0.05,0.2,0.2,0.2)))
Q4 <- as.factor(sample(1:6, 500, replace = TRUE, prob = c(0.2,0.1,0.1,0.4,0.1,0.1)))
Q5 <- as.factor(sample(1:6, 500, replace = TRUE, prob = c(0.1,0.4,0.1,0.3,0.05,0.15)))
likert_6 <- data.frame(Q1, Q2, Q3, Q4, Q5)
# create labels
levels_6 <- c("Very strongly agree", "Strongly agree", "Agree",
              "Disagree", "Strongly disagree", "Very strongly disagree")
# create item labels
items <- c("Q1", "Q2", "Q3", "Q4", "Q5")
# plot dichotomous likert scale, ordered by "negative" values
sjp.likert(likert_2,
           geom.colors = c("green", "red"),
           legendLabels = levels_2,
           axisLabels.y = items,
           sort.frq = "neg.desc")
## Warning: Stacking not well defined when ymin != 0

# plot 4-category-likert-scale, no order
sjp.likert(likert_4,
           cat.neutral = 5,
           legendLabels = levels_4,
           axisLabels.y = items,
           gridRange = 1.2,
           expand.grid = FALSE,
           value.labels = "sum.outside",
           showPercentageSign = TRUE)
## Warning in max(catcount): max の引数に有限な値がありません: -Inf を返します
## Warning in min(catcount): min の引数に有限な値がありません: Inf を返します
## Warning: Length of labels for item categories 'legendLabels' differs from
## detected amount of categories. Use 'catcount' argument to define amount of
## item categories, if plotting does not work.
## Warning: Stacking not well defined when ymin != 0

# plot 6-category-likert-scale, ordered by positive values,
# in brown color scale
sjp.likert(likert_6,
           legendLabels = levels_6,
           axisLabels.y = items,
           sort.frq = "pos.asc",
           labelDigits = 0,
           showPercentageSign = TRUE,
           value.labels = "sum.inside")
## Warning: Stacking not well defined when ymin != 0

sjp.lm

#Examples
# --------------------------------------------------
# plotting estimates of linear models as forest plot
# --------------------------------------------------
# fit linear model
fit <- lm(airquality$Ozone ~ airquality$Wind + airquality$Temp + 
            airquality$Solar.R)
# plot estimates with CI
sjp.lm(fit, gridBreaksAt = 2)
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.

# plot estimates with CI
# and with narrower tick marks
# (because "gridBreaksAt" was not specified)
sjp.lm(fit)
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.

# ---------------------------------------------------
# plotting regression line of linear model (done
# automatically if fitted model has only 1 predictor)
# ---------------------------------------------------
library(sjmisc)
data(efc)
# fit model
fit <- lm(neg_c_7 ~ quol_5, data=efc)
# plot regression line with label strings
sjp.lm(fit,
       axisLabels.x = "Quality of life",
       axisLabels.y = "Burden of care",
       showLoess = TRUE)

# --------------------------------------------------
# plotting regression lines of each single predictor
# of a fitted model
# --------------------------------------------------
library(sjmisc)
data(efc)
# fit model
fit <- lm(tot_sc_e ~ c12hour + e17age + e42dep, data=efc)
# reression line and scatter plot
sjp.lm(fit, type = "pred")

# reression line w/o scatter plot
sjp.lm(fit,
       type = "pred",
       showScatterPlot = FALSE)

# --------------------------
# plotting model assumptions
# --------------------------
sjp.lm(fit, type = "ma")
## Not run:

sjp.poly

# --------------------------
# plotting polynomial terms
# --------------------------
library(sjmisc)
data(efc)
# fit sample model
fit <- lm(tot_sc_e ~ c12hour + e17age + e42dep, data = efc)
# "e17age" does not seem to be linear correlated to response
# try to find appropiate polynomial. Grey line (loess smoothed)
# indicates best fit. Looks like x^3 has a good fit.
# (not checked for significance yet).
sjp.poly(fit, "e17age", 2:4, showScatterPlot = FALSE)
## Polynomial degrees: 2
## ---------------------
## p(x^1): 0.734
## p(x^2): 0.721
## 
## Polynomial degrees: 3
## ---------------------
## p(x^1): 0.010
## p(x^2): 0.011
## p(x^3): 0.011
## 
## Polynomial degrees: 4
## ---------------------
## p(x^1): 0.234
## p(x^2): 0.267
## p(x^3): 0.303
## p(x^4): 0.343

# fit new model
fit <- lm(tot_sc_e ~ c12hour + e42dep +
            e17age + I(e17age^2) + I(e17age^3),
          data = efc)
# plot marginal effects of polynomial term
sjp.lm(fit, type = "poly", poly.term = "e17age")

library(splines)
# fit new model with "splines"-package, "bs"
fit <- lm(tot_sc_e ~ c12hour + e42dep + bs(e17age, 3), data = efc)
# plot marginal effects of polynomial term, same call as above
sjp.lm(fit, type = "poly", poly.term = "e17age")
## NOTE: e17age does not appear in the model

## End(Not run)

sjp.lmer

#Examples
# fit model
library(lme4)
fit <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy)
# simple plot
sjp.lmer(fit)
## Plotting random effects...

# plot fixed effects
sjp.lmer(fit, type = "fe")
## Computing approximate p-values via Wald chi-squared test...

# sort by predictor Days
sjp.lmer(fit, sort.coef = "Days")
## Plotting random effects...

# plot each predictor as own plot
# sort each plot
sjp.lmer(fit,
         facet.grid = FALSE,
         sort.coef = "sort.all")

library(sjmisc)
data(efc)
# prepare group variable
efc$grp = as.factor(efc$e15relat)
levels(x = efc$grp) <- get_labels(efc$e15relat)
# data frame for fitted model
mydf <- data.frame(neg_c_7 = as.numeric(efc$neg_c_7),
                   sex = as.factor(efc$c161sex),
                   c12hour = as.numeric(efc$c12hour),
                   barthel = as.numeric(efc$barthtot),
                   grp = efc$grp)
# fit lmer
fit <- lmer(neg_c_7 ~ sex + c12hour + barthel + (1|grp),
            data = mydf)
# plot fixed effects
sjp.lmer(fit, type = "fe")
## Computing approximate p-values via Wald chi-squared test...

sjp.lmer(fit,
         type = "fe.std",
         sort.coef = TRUE)
## Computing approximate p-values via Wald chi-squared test...

# plot fixed effects slopes for
# each random intercept, but only for
# coefficient "c12hour"
sjp.lmer(fit,
         type = "fe.ri",
         vars = "c12hour")

# highlight specific grouping levels
# in this case we compare spouses, children
# and children-in-law
sjp.lmer(fit,
         type = "fe.ri",
         emph.grp = c(1, 2, 4),
         vars = "c12hour")
## Emphasizing groups only works in non-faceted plots. Use 'facet.grid = FALSE' to enable group emphasizing. 'emph.grp' was set to NULL.

## Not run:
# --------------------------
# plotting polynomial terms
# --------------------------
# check linear relation between predictors and response
sjp.lmer(fit, type = "fe.pred")

# "barthel" does not seem to be linear correlated to response
# try to find appropiate polynomial. Grey line (loess smoothed)
# indicates best fit. Looks like x^4 has the best fit,
# however, x^2 seems to be suitable according to p-values.
sjp.poly(fit, "barthel", 2:4, showScatterPlot = FALSE)
## Polynomial degrees: 2
## ---------------------
## p(x^1): 0.178
## p(x^2): 0.000
## 
## Polynomial degrees: 3
## ---------------------
## p(x^1): 0.107
## p(x^2): 0.053
## p(x^3): 0.240
## 
## Polynomial degrees: 4
## ---------------------
## p(x^1): 0.819
## p(x^2): 0.530
## p(x^3): 0.280
## p(x^4): 0.216

# fit new model
fit <- lmer(neg_c_7 ~ sex + c12hour + barthel +
              I(barthel^2) + (1|grp), data = mydf)
## Warning: Some predictor variables are on very different scales: consider
## rescaling
# plot marginal effects of polynomial term
sjp.lmer(fit, type = "poly", poly.term = "barthel")

# lme4 complaints about scale of polynomial term, so
# try centering this predictor
mydf$barthel_s <- scale(mydf$barthel, center = TRUE, scale = TRUE)
# re-fit model
fit_s <- lmer(neg_c_7 ~ sex + c12hour + barthel_s +
                I(barthel_s^2) + (1|grp), data = mydf)
# plot marginal effects of centered, scaled polynomial term
sjp.lmer(fit_s, type = "poly", poly.term = "barthel_s")

# scaling also improved p-values
sjt.lmer(fit, fit_s)
## Computing approximate p-values via Wald chi-squared test...
## Computing approximate p-values via Wald chi-squared test...
## End(Not run)

sjp.lmm

#Examples
# prepare dummy variables for binary logistic regression
# Now fit the models. Note that all models share the same predictors
# and only differ in their dependent variable
library(sjmisc)
data(efc)
# fit three models
fit1 <- lm(barthtot ~ c160age + c12hour + c161sex + c172code, data = efc)
fit2 <- lm(neg_c_7 ~ c160age + c12hour + c161sex + c172code, data = efc)
fit3 <- lm(tot_sc_e ~ c160age + c12hour + c161sex + c172code, data = efc)
# plot multiple models
sjp.lmm(fit1, fit2, fit3, facet.grid = TRUE)
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.
## Warning: position_dodge requires non-overlapping x intervals
## Warning: position_dodge requires non-overlapping x intervals
## Warning: position_dodge requires non-overlapping x intervals
## Warning: position_dodge requires non-overlapping x intervals
## Warning: position_dodge requires non-overlapping x intervals
## Warning: position_dodge requires non-overlapping x intervals

# plot multiple models with legend labels and
# point shapes instead of value labels
sjp.lmm(fit1, fit2, fit3,
        axisLabels.y = c("Carer's Age",
                         "Hours of Care",
                         "Carer's Sex",
                         "Educational Status"),
        labelDependentVariables = c("Barthel Index",
                                    "Negative Impact",
                                    "Services used"),
        showValueLabels = FALSE,
        showPValueLabels = FALSE,
        fade.ns = TRUE,
        usePShapes = TRUE)
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.
## Warning: position_dodge requires non-overlapping x intervals
## Warning: position_dodge requires non-overlapping x intervals

# ------------------------------
# plot multiple models from nested lists argument
# ------------------------------
all.models <- list()
all.models[[1]] <- fit1
all.models[[2]] <- fit2
all.models[[3]] <- fit3
sjp.lmm(all.models)
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.
## Warning: position_dodge requires non-overlapping x intervals
## Warning: position_dodge requires non-overlapping x intervals

# ------------------------------
# plot multiple models with different
# predictors (stepwise inclusion),
# standardi estimates
# ------------------------------
fit1 <- lm(mpg ~ wt + cyl + disp + gear, data = mtcars)
fit2 <- update(fit1, . ~ . + hp)
fit3 <- update(fit2, . ~ . + am)
sjp.lmm(fit1, fit2, fit3, type = "std2")
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.
## Warning: position_dodge requires non-overlapping x intervals
## Warning: position_dodge requires non-overlapping x intervals

sjp.pca

#Examples
# randomly create data frame with 7 items, each consisting of 4 categories
likert_4 <- data.frame(sample(1:4,
                              500,
                              replace = TRUE,
                              prob = c(0.2, 0.3, 0.1, 0.4)),
                       sample(1:4,
                              500,
                              replace = TRUE,
                              prob = c(0.5, 0.25, 0.15, 0.1)),
                       sample(1:4,
                              500,
                              replace = TRUE,
                              prob = c(0.4, 0.15, 0.25, 0.2)),
                       sample(1:4,
                              500,
                              replace = TRUE,
                              prob = c(0.25, 0.1, 0.4, 0.25)),
                       sample(1:4,
                              500,
                              replace = TRUE,
                              prob = c(0.1, 0.4, 0.4, 0.1)),
                       sample(1:4,
                              500,
                              replace = TRUE),
                       sample(1:4,
                              500,
                              replace = TRUE,
                              prob = c(0.35, 0.25, 0.15, 0.25)))
# Create variable labels
colnames(likert_4) <- c("V1", "V2", "V3", "V4", "V5", "V6", "V7")
# plot results from PCA as square-tiled "heatmap"
sjp.pca(likert_4, type = "tile")
# plot results from PCA as bars
sjp.pca(likert_4, type = "bar")
# manually compute PCA
pca <- prcomp(na.omit(likert_4),
              retx = TRUE,
              center = TRUE,
              scale. = TRUE)
# plot results from PCA as circles, including Eigenvalue-diagnostic.
# note that this plot does not compute the Cronbach's Alpha
sjp.pca(pca,
        plotEigenvalues = TRUE,
        type = "circle",
        geom.size = 10)
# -------------------------------
# Data from the EUROFAMCARE sample dataset
# -------------------------------
library(sjmisc)
data(efc)
# retrieve variable and value labels
varlabs <- get_label(efc)
# recveive first item of COPE-index scale
start <- which(colnames(efc) == "c82cop1")
# recveive last item of COPE-index scale
end <- which(colnames(efc) == "c90cop9")
# create data frame with COPE-index scale
mydf <- data.frame(efc[, c(start:end)])
colnames(mydf) <- varlabs[c(start:end)]
sjp.pca(mydf)
## Following items have been removed:
## none.

sjp.pca(mydf, type = "tile")
## Following items have been removed:
## none.

# -------------------------------
# auto-detection of labels
# -------------------------------
sjp.pca(efc[, c(start:end)], type = "circle", geom.size = 10)
## Following items have been removed:
## none.

sjp.poly

library(sjmisc)
data(efc)
# linear fit. loess-smoothed line indicates a more
# or less cubic curve
sjp.poly(efc$c160age, efc$quol_5, 1)
## Polynomial degrees: 1
## ---------------------
## p(x^1): 0.000

# quadratic fit
sjp.poly(efc$c160age, efc$quol_5, 2)
## Polynomial degrees: 2
## ---------------------
## p(x^1): 0.078
## p(x^2): 0.533

# linear to cubic fit
sjp.poly(efc$c160age, efc$quol_5,
         1:4, showScatterPlot = FALSE)
## Polynomial degrees: 1
## ---------------------
## p(x^1): 0.000
## 
## Polynomial degrees: 2
## ---------------------
## p(x^1): 0.078
## p(x^2): 0.533
## 
## Polynomial degrees: 3
## ---------------------
## p(x^1): 0.012
## p(x^2): 0.001
## p(x^3): 0.000
## 
## Polynomial degrees: 4
## ---------------------
## p(x^1): 0.777
## p(x^2): 0.913
## p(x^3): 0.505
## p(x^4): 0.254

library(sjmisc)
data(efc)
# fit sample model
fit <- lm(tot_sc_e ~ c12hour + e17age + e42dep, data = efc)
# inspect relationship between predictors and response
sjp.lm(fit, type = "pred",
       showLoess = TRUE, showScatterPlot = FALSE)

## Warning in simpleLoess(y, x, w, span, degree, parametric, drop.square,
## normalize, : pseudoinverse used at 4.015
## Warning in simpleLoess(y, x, w, span, degree, parametric, drop.square,
## normalize, : neighborhood radius 2.015
## Warning in simpleLoess(y, x, w, span, degree, parametric, drop.square,
## normalize, : reciprocal condition number 3.6014e-017
## Warning in simpleLoess(y, x, w, span, degree, parametric, drop.square,
## normalize, : There are other near singularities as well. 1

# "e17age" does not seem to be linear correlated to response
# try to find appropiate polynomial. Grey line (loess smoothed)
# indicates best fit. Looks like x^4 has the best fit,
# however, only x^3 has significant p-values.
sjp.poly(fit, "e17age", 2:4, showScatterPlot = FALSE)
## Polynomial degrees: 2
## ---------------------
## p(x^1): 0.734
## p(x^2): 0.721
## 
## Polynomial degrees: 3
## ---------------------
## p(x^1): 0.010
## p(x^2): 0.011
## p(x^3): 0.011
## 
## Polynomial degrees: 4
## ---------------------
## p(x^1): 0.234
## p(x^2): 0.267
## p(x^3): 0.303
## p(x^4): 0.343

## Not run:
# fit new model
fit <- lm(tot_sc_e ~ c12hour + e42dep +
            e17age + I(e17age^2) + I(e17age^3),
          data = efc)
# plot marginal effects of polynomial term
sjp.lm(fit, type = "poly", poly.term = "e17age")

## End(Not run)

sjp.scatter

#Examples
# load sample date
library(sjmisc)
data(efc)
# simple scatter plot, auto-jittering
sjp.scatter(efc$e16sex, efc$neg_c_7)
## auto-jittering values...

# simple scatter plot, no jittering needed
sjp.scatter(efc$c160age, efc$e17age)

# grouped scatter plot
sjp.scatter(efc$c160age, efc$e17age, efc$e42dep)

# grouped and jittered scatter plot with marginal rug plot
sjp.scatter(efc$e16sex,efc$neg_c_7, efc$c172code, showRug = TRUE)
## auto-jittering values...

# grouped and labelled scatter plot, not using the auto-detection
# of labels, but instead pass labels as arguments
sjp.scatter(efc$c160age, efc$e17age, efc$e42dep,
            title = "Scatter Plot",
            legendTitle = get_label(efc)['e42dep'],
            legendLabels = get_labels(efc)[['e42dep']],
            axisTitle.x = get_label(efc)['c160age'],
            axisTitle.y = get_label(efc)['e17age'],
            showGroupFitLine = TRUE)

# grouped and labelled scatter plot as facets
sjp.scatter(efc$c160age,efc$e17age, efc$e42dep,
            showGroupFitLine = TRUE,
            facet.grid = TRUE,
            show.se = TRUE)

# plot residuals of fitted models
fit <- lm(neg_c_7 ~ quol_5, data = efc)
sjp.scatter(y = fit$residuals, showTotalFitLine = TRUE)

# "hide" axis titles
sjp.scatter(efc$c160age, efc$e17age, efc$e42dep,
            title = "", axisTitle.x = "", axisTitle.y = "")

# plot text labels
pl <- c(1:10)
for (i in 1:10) pl[i] <- paste(sample(c(0:9, letters, LETTERS),
                                      8, replace = TRUE),
                               collapse = "")
sjp.scatter(runif(10), runif(10), pointLabels = pl)

#Examples
## Not run:
library(sjmisc)
data(efc)
# set sjPlot-defaults, a slightly modification
# of the ggplot base theme
sjp.setTheme()
# legends of all plots inside
sjp.setTheme(legend.pos = "top left",
             legend.inside = TRUE)
sjp.xtab(efc$e42dep, efc$e16sex)
# Use classic-theme. you may need to
# load the ggplot2-library.
library(ggplot2)
sjp.setTheme(theme = theme_classic())
sjp.frq(efc$e42dep)
# adjust value labels
sjp.setTheme(geom.label.size = 3.5,
             geom.label.color = "#3366cc",
             geom.label.angle = 90)
# hjust-aes needs adjustment for this
update_geom_defaults('text', list(hjust = -0.1))
sjp.xtab(efc$e42dep,
         efc$e16sex,
         labelPos = "center")
# Create own theme based on classic-theme
sjp.setTheme(base = theme_classic(),
             axis.linecolor = "grey50",
             axis.textcolor = "#6699cc")
sjp.frq(efc$e42dep)
# use theme pre-set
sjp.setTheme(theme = "538",
             geom.alpha = 0.8)
library(ggplot2) # for custom base-line
sjp.frq(efc$e42dep,
        geom.color = "#c0392b",
        expand.grid = TRUE,
        printPlot = FALSE)$plot +
  geom_hline(yintercept = 0,
             size = 0.5,
             colour = "black")
## End(Not run)

sjp.stackfrq

#Examples
# -------------------------------
# random sample
# -------------------------------
# prepare data for 4-category likert scale, 5 items
Q1 <- as.factor(sample(1:4, 500, replace = TRUE,
                       prob = c(0.2, 0.3, 0.1, 0.4)))
Q2 <- as.factor(sample(1:4, 500, replace = TRUE,
                       prob = c(0.5, 0.25, 0.15, 0.1)))
Q3 <- as.factor(sample(1:4, 500, replace = TRUE,
                       prob = c(0.25, 0.1, 0.4, 0.25)))
Q4 <- as.factor(sample(1:4, 500, replace = TRUE,
                       prob = c(0.1, 0.4, 0.4, 0.1)))
Q5 <- as.factor(sample(1:4, 500, replace = TRUE,
                       prob = c(0.35, 0.25, 0.15, 0.25)))
likert_4 <- data.frame(Q1, Q2, Q3, Q4, Q5)
# create labels
levels_4 <- c("Independent",
              "Slightly dependent",
              "Dependent",
              "Severely dependent")
# plot stacked frequencies of 5 (ordered) item-scales
sjp.stackfrq(likert_4, legendLabels = levels_4)

# -------------------------------
# Data from the EUROFAMCARE sample dataset
# -------------------------------
library(sjmisc)
data(efc)
# recveive first item of COPE-index scale
start <- which(colnames(efc) == "c82cop1")
# recveive first item of COPE-index scale
end <- which(colnames(efc) == "c90cop9")
# retrieve variable and value labels
varlabs <- get_label(efc)
vallabs <- get_labels(efc)
# create value labels. We need just one variable of
# the COPE-index scale because they have all the same
# level / category / value labels
levels <- vallabs['c82cop1']
# create item labels
items <- varlabs[c(start:end)]
sjp.stackfrq(efc[, c(start:end)],
             legendLabels = levels,
             axisLabels.y = items,
             jitterValueLabels = TRUE)

# -------------------------------
# auto-detection of labels
# -------------------------------
sjp.stackfrq(efc[, c(start:end)])

sjp.xtab

#Examples
# create 4-category-items
grp <- sample(1:4, 100, replace = TRUE)
# create 3-category-items
x <- sample(1:3, 100, replace = TRUE)
# plot "cross tablulation" of x and grp
sjp.xtab(x, grp)
# plot "cross tablulation" of x and y, including labels
sjp.xtab(x, grp,
         axisLabels.x = c("low", "mid", "high"),
         legendLabels = c("Grp 1", "Grp 2", "Grp 3", "Grp 4"))
# plot "cross tablulation" of x and grp
# as stacked proportional bars
sjp.xtab(x, grp,
         tableIndex = "row",
         barPosition = "stack",
         showTableSummary = TRUE,
         coord.flip = TRUE)
# example with vertical labels
library(sjmisc)
data(efc)
sjp.setTheme(geom.label.angle = 90)
# hjust-aes needs adjustment for this
library(ggplot2)
update_geom_defaults('text', list(hjust = -0.1))
sjp.xtab(efc$e42dep,
         efc$e16sex,
         labelPos = "center")
# grouped bars with EUROFAMCARE sample dataset
# dataset was importet from an SPSS-file,
# see ?sjmisc::read_spss
data(efc)
efc.val <- get_labels(efc)
efc.var <- get_label(efc)
sjp.xtab(efc$e42dep,
         efc$e16sex,
         title = efc.var['e42dep'],
         axisLabels.x = efc.val[['e42dep']],
         legendTitle = efc.var['e16sex'],
         legendLabels = efc.val[['e16sex']])
sjp.xtab(efc$e16sex,
         efc$e42dep,
         title = efc.var['e16sex'],
         axisLabels.x = efc.val[['e16sex']],
         legendTitle = efc.var['e42dep'],
         legendLabels = efc.val[['e42dep']])
# -------------------------------
# auto-detection of labels works here
# so no need to specify labels. For
# title-auto-detection, use NULL
# -------------------------------
sjp.xtab(efc$e16sex, efc$e42dep, title = NULL)
sjp.xtab(efc$e16sex,
         efc$e42dep,
         tableIndex = "row",
         barPosition = "stack",
         coord.flip = TRUE,
         jitterValueLabels = TRUE)

sjt.corr

## Not run:
# create data frame with 5 random variables
mydf <- data.frame(cbind(runif(10),
                         runif(10),
                         runif(10),
                         runif(10),
                         runif(10)))
# plot correlation matrix using circles
sjt.corr(mydf)
# -------------------------------
# Data from the EUROFAMCARE sample dataset
# -------------------------------
library(sjmisc)
data(efc)
# retrieve variable and value labels
varlabs <- get_label(efc)
# recveive first item of COPE-index scale
start <- which(colnames(efc) == "c83cop2")
# recveive last item of COPE-index scale
end <- which(colnames(efc) == "c88cop7")
# create data frame with COPE-index scale
mydf <- data.frame(efc[, c(start:end)])
colnames(mydf) <- varlabs[c(start:end)]
# we have high correlations here, because all items
# belong to one factor. See example from "sjp.pca".
sjt.corr(mydf, pvaluesAsNumbers = TRUE)
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
# -------------------------------
# auto-detection of labels, only lower triangle
# -------------------------------
sjt.corr(efc[, c(start:end)], triangle = "lower")
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
# -------------------------------
# auto-detection of labels, only lower triangle,
# all correlation values smaller than 0.3 are not
# shown in the table
# -------------------------------
sjt.corr(efc[, c(start:end)],
         triangle = "lower",
         val.rm = 0.3)
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
# -------------------------------
# auto-detection of labels, only lower triangle,
# all correlation values smaller than 0.3 are printed
# in blue
# -------------------------------
sjt.corr(efc[, c(start:end)],
         triangle = "lower",
         val.rm = 0.3,
         CSS = list(css.valueremove = 'color:blue;'))
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## End(Not run)

sjt.df

#Examples
## Not run:
# init dataset
library(sjmisc)
data(efc)
# plot efc-data frame summary
sjt.df(efc, alternateRowColors = TRUE)
## 
  |                                                                       
  |                                                                 |   0%
  |                                                                       
  |==                                                               |   4%
  |                                                                       
  |=====                                                            |   8%
  |                                                                       
  |========                                                         |  12%
  |                                                                       
  |==========                                                       |  15%
  |                                                                       
  |============                                                     |  19%
  |                                                                       
  |===============                                                  |  23%
  |                                                                       
  |==================                                               |  27%
  |                                                                       
  |====================                                             |  31%
  |                                                                       
  |======================                                           |  35%
  |                                                                       
  |=========================                                        |  38%
  |                                                                       
  |============================                                     |  42%
  |                                                                       
  |==============================                                   |  46%
  |                                                                       
  |================================                                 |  50%
  |                                                                       
  |===================================                              |  54%
  |                                                                       
  |======================================                           |  58%
  |                                                                       
  |========================================                         |  62%
  |                                                                       
  |==========================================                       |  65%
  |                                                                       
  |=============================================                    |  69%
  |                                                                       
  |================================================                 |  73%
  |                                                                       
  |==================================================               |  77%
  |                                                                       
  |====================================================             |  81%
  |                                                                       
  |=======================================================          |  85%
  |                                                                       
  |==========================================================       |  88%
  |                                                                       
  |============================================================     |  92%
  |                                                                       
  |==============================================================   |  96%
  |                                                                       
  |=================================================================| 100%
# plot content, first 50 rows of first 5 columns of example data set
sjt.df(efc[1:50, 1:5],
       describe = FALSE,
       stringVariable = "Observation")
## 
  |                                                                       
  |                                                                 |   0%
  |                                                                       
  |=                                                                |   2%
  |                                                                       
  |===                                                              |   4%
  |                                                                       
  |====                                                             |   6%
  |                                                                       
  |=====                                                            |   8%
  |                                                                       
  |======                                                           |  10%
  |                                                                       
  |========                                                         |  12%
  |                                                                       
  |=========                                                        |  14%
  |                                                                       
  |==========                                                       |  16%
  |                                                                       
  |============                                                     |  18%
  |                                                                       
  |=============                                                    |  20%
  |                                                                       
  |==============                                                   |  22%
  |                                                                       
  |================                                                 |  24%
  |                                                                       
  |=================                                                |  26%
  |                                                                       
  |==================                                               |  28%
  |                                                                       
  |====================                                             |  30%
  |                                                                       
  |=====================                                            |  32%
  |                                                                       
  |======================                                           |  34%
  |                                                                       
  |=======================                                          |  36%
  |                                                                       
  |=========================                                        |  38%
  |                                                                       
  |==========================                                       |  40%
  |                                                                       
  |===========================                                      |  42%
  |                                                                       
  |=============================                                    |  44%
  |                                                                       
  |==============================                                   |  46%
  |                                                                       
  |===============================                                  |  48%
  |                                                                       
  |================================                                 |  50%
  |                                                                       
  |==================================                               |  52%
  |                                                                       
  |===================================                              |  54%
  |                                                                       
  |====================================                             |  56%
  |                                                                       
  |======================================                           |  58%
  |                                                                       
  |=======================================                          |  60%
  |                                                                       
  |========================================                         |  62%
  |                                                                       
  |==========================================                       |  64%
  |                                                                       
  |===========================================                      |  66%
  |                                                                       
  |============================================                     |  68%
  |                                                                       
  |==============================================                   |  70%
  |                                                                       
  |===============================================                  |  72%
  |                                                                       
  |================================================                 |  74%
  |                                                                       
  |=================================================                |  76%
  |                                                                       
  |===================================================              |  78%
  |                                                                       
  |====================================================             |  80%
  |                                                                       
  |=====================================================            |  82%
  |                                                                       
  |=======================================================          |  84%
  |                                                                       
  |========================================================         |  86%
  |                                                                       
  |=========================================================        |  88%
  |                                                                       
  |==========================================================       |  90%
  |                                                                       
  |============================================================     |  92%
  |                                                                       
  |=============================================================    |  94%
  |                                                                       
  |==============================================================   |  96%
  |                                                                       
  |================================================================ |  98%
  |                                                                       
  |=================================================================| 100%
# plot efc-data frame summary, sorted descending by mean-column
sjt.df(efc,
       orderColumn = "mean",
       orderAscending = FALSE)
## 
  |                                                                       
  |                                                                 |   0%
  |                                                                       
  |==                                                               |   4%
  |                                                                       
  |=====                                                            |   8%
  |                                                                       
  |========                                                         |  12%
  |                                                                       
  |==========                                                       |  15%
  |                                                                       
  |============                                                     |  19%
  |                                                                       
  |===============                                                  |  23%
  |                                                                       
  |==================                                               |  27%
  |                                                                       
  |====================                                             |  31%
  |                                                                       
  |======================                                           |  35%
  |                                                                       
  |=========================                                        |  38%
  |                                                                       
  |============================                                     |  42%
  |                                                                       
  |==============================                                   |  46%
  |                                                                       
  |================================                                 |  50%
  |                                                                       
  |===================================                              |  54%
  |                                                                       
  |======================================                           |  58%
  |                                                                       
  |========================================                         |  62%
  |                                                                       
  |==========================================                       |  65%
  |                                                                       
  |=============================================                    |  69%
  |                                                                       
  |================================================                 |  73%
  |                                                                       
  |==================================================               |  77%
  |                                                                       
  |====================================================             |  81%
  |                                                                       
  |=======================================================          |  85%
  |                                                                       
  |==========================================================       |  88%
  |                                                                       
  |============================================================     |  92%
  |                                                                       
  |==============================================================   |  96%
  |                                                                       
  |=================================================================| 100%
# plot first 20 rows of first 5 columns of example data set,
# sort by column "e42dep" with alternating row colors
sjt.df(efc[1:20, 1:5],
       alternateRowColors = TRUE,
       orderColumn = "e42dep",
       describe = FALSE)
## 
  |                                                                       
  |                                                                 |   0%
  |                                                                       
  |===                                                              |   5%
  |                                                                       
  |======                                                           |  10%
  |                                                                       
  |==========                                                       |  15%
  |                                                                       
  |=============                                                    |  20%
  |                                                                       
  |================                                                 |  25%
  |                                                                       
  |====================                                             |  30%
  |                                                                       
  |=======================                                          |  35%
  |                                                                       
  |==========================                                       |  40%
  |                                                                       
  |=============================                                    |  45%
  |                                                                       
  |================================                                 |  50%
  |                                                                       
  |====================================                             |  55%
  |                                                                       
  |=======================================                          |  60%
  |                                                                       
  |==========================================                       |  65%
  |                                                                       
  |==============================================                   |  70%
  |                                                                       
  |=================================================                |  75%
  |                                                                       
  |====================================================             |  80%
  |                                                                       
  |=======================================================          |  85%
  |                                                                       
  |==========================================================       |  90%
  |                                                                       
  |==============================================================   |  95%
  |                                                                       
  |=================================================================| 100%
# plot first 20 rows of first 5 columns of example data set,
# sorted by 4th column in descending order.
sjt.df(efc[1:20, 1:5],
       orderColumn = 4,
       orderAscending = FALSE,
       describe = FALSE)
## 
  |                                                                       
  |                                                                 |   0%
  |                                                                       
  |===                                                              |   5%
  |                                                                       
  |======                                                           |  10%
  |                                                                       
  |==========                                                       |  15%
  |                                                                       
  |=============                                                    |  20%
  |                                                                       
  |================                                                 |  25%
  |                                                                       
  |====================                                             |  30%
  |                                                                       
  |=======================                                          |  35%
  |                                                                       
  |==========================                                       |  40%
  |                                                                       
  |=============================                                    |  45%
  |                                                                       
  |================================                                 |  50%
  |                                                                       
  |====================================                             |  55%
  |                                                                       
  |=======================================                          |  60%
  |                                                                       
  |==========================================                       |  65%
  |                                                                       
  |==============================================                   |  70%
  |                                                                       
  |=================================================                |  75%
  |                                                                       
  |====================================================             |  80%
  |                                                                       
  |=======================================================          |  85%
  |                                                                       
  |==========================================================       |  90%
  |                                                                       
  |==============================================================   |  95%
  |                                                                       
  |=================================================================| 100%
# add big mark to thousands
library(datasets)
sjt.df(as.data.frame(WorldPhones), big.mark = ",")
## 
  |                                                                       
  |                                                                 |   0%
  |                                                                       
  |=========                                                        |  14%
  |                                                                       
  |===================                                              |  29%
  |                                                                       
  |============================                                     |  43%
  |                                                                       
  |=====================================                            |  57%
  |                                                                       
  |==============================================                   |  71%
  |                                                                       
  |========================================================         |  86%
  |                                                                       
  |=================================================================| 100%
# ----------------------------------------------------------------
# User defined style sheet
# ----------------------------------------------------------------
sjt.df(efc,
       alternateRowColor = TRUE,
       CSS = list(css.table = "border: 2px solid #999999;",
                  css.tdata = "border-top: 1px solid;",
                  css.arc = "color:blue;"))
## 
  |                                                                       
  |                                                                 |   0%
  |                                                                       
  |==                                                               |   4%
  |                                                                       
  |=====                                                            |   8%
  |                                                                       
  |========                                                         |  12%
  |                                                                       
  |==========                                                       |  15%
  |                                                                       
  |============                                                     |  19%
  |                                                                       
  |===============                                                  |  23%
  |                                                                       
  |==================                                               |  27%
  |                                                                       
  |====================                                             |  31%
  |                                                                       
  |======================                                           |  35%
  |                                                                       
  |=========================                                        |  38%
  |                                                                       
  |============================                                     |  42%
  |                                                                       
  |==============================                                   |  46%
  |                                                                       
  |================================                                 |  50%
  |                                                                       
  |===================================                              |  54%
  |                                                                       
  |======================================                           |  58%
  |                                                                       
  |========================================                         |  62%
  |                                                                       
  |==========================================                       |  65%
  |                                                                       
  |=============================================                    |  69%
  |                                                                       
  |================================================                 |  73%
  |                                                                       
  |==================================================               |  77%
  |                                                                       
  |====================================================             |  81%
  |                                                                       
  |=======================================================          |  85%
  |                                                                       
  |==========================================================       |  88%
  |                                                                       
  |============================================================     |  92%
  |                                                                       
  |==============================================================   |  96%
  |                                                                       
  |=================================================================| 100%
## End(Not run)

sjt.frq

#Examples
## Not run:
# load sample data
library(sjmisc)
data(efc)
# show frequencies of "e42dep" in RStudio Viewer Pane
# or default web browser
sjt.frq(efc$e42dep)
# plot and show frequency table of "e42dep" with labels
sjt.frq(efc$e42dep,
        variableLabels = "Dependency",
        valueLabels = c("independent",
                        "slightly dependent",
                        "moderately dependent",
                        "severely dependent"))
# plot frequencies of e42dep, e16sex and c172code in one HTML file
# and show table in RStudio Viewer Pane or default web browser
# Note that valueLabels of multiple variables have to be
# list-objects
sjt.frq(data.frame(efc$e42dep, efc$e16sex, efc$c172code),
        variableLabels = c("Dependency",
                           "Gender",
                           "Education"),
        valueLabels = list(c("independent",
                             "slightly dependent",
                             "moderately dependent",
                             "severely dependent"),
                           c("male", "female"),
                           c("low", "mid", "high")))
# -------------------------------
# auto-detection of labels
# due to auto-detection of labels, this works as well
# -------------------------------
sjt.frq(data.frame(efc$e42dep, efc$e16sex, efc$c172code))
# plot larger scale including zero-counts
# indicating median and quartiles
sjt.frq(efc$neg_c_7,
        highlightMedian = TRUE,
        highlightQuartiles = TRUE)
# -------------------------------
# sort frequencies
# -------------------------------
sjt.frq(efc$e42dep, sort.frq = "desc")
## Sorting may not work when data contains values with zero-counts.
# --------------------------------
# User defined style sheet
# --------------------------------
sjt.frq(efc$e42dep,
        CSS = list(css.table = "border: 2px solid;",
                   css.tdata = "border: 1px solid;",
                   css.firsttablecol = "color:#003399; font-weight:bold;"))
## End(Not run)

sjt.glm

# prepare dummy variables for binary logistic regression
y1 <- ifelse(swiss$Fertility < median(swiss$Fertility), 0, 1)
y2 <- ifelse(swiss$Infant.Mortality < median(swiss$Infant.Mortality), 0, 1)
y3 <- ifelse(swiss$Agriculture < median(swiss$Agriculture), 0, 1)
# Now fit the models. Note that both models share the same predictors
# and only differ in their dependent variable (y1, y2 and y3)
fitOR1 <- glm(y1 ~ swiss$Education + swiss$Examination+swiss$Catholic,
              family = binomial(link = "logit"))
fitOR2 <- glm(y2 ~ swiss$Education + swiss$Examination+swiss$Catholic,
              family = binomial(link = "logit"))
fitOR3 <- glm(y3 ~ swiss$Education + swiss$Examination+swiss$Catholic,
              family = binomial(link = "logit"))
# open HTML-table in RStudio Viewer Pane or web browser
## Not run:
sjt.glm(fitOR1,
        fitOR2,
        labelDependentVariables = c("Fertility",
                                    "Infant Mortality"),
        labelPredictors = c("Education",
                            "Examination",
                            "Catholic"),
        ci.hyphen = " to ")
## Waiting for profiling to be done...
## Waiting for profiling to be done...
# open HTML-table in RStudio Viewer Pane or web browser,
# integrate CI in OR column
sjt.glm(fitOR1, fitOR2, fitOR3,
        labelDependentVariables = c("Fertility",
                                    "Infant Mortality",
                                    "Agriculture"),
        labelPredictors = c("Education", "Examination", "Catholic"),
        separateConfColumn = FALSE)
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## Warning: glm.fit: fitted probabilities numerically 0 or 1 occurred
# open HTML-table in RStudio Viewer Pane or web browser,
# indicating p-values as numbers and printing CI in a separate column
sjt.glm(fitOR1, fitOR2, fitOR3,
        labelDependentVariables = c("Fertility",
                                    "Infant Mortality",
                                    "Agriculture"),
        labelPredictors = c("Education", "Examination", "Catholic"))
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## Warning: glm.fit: fitted probabilities numerically 0 or 1 occurred
# --------------------------------------------
# User defined style sheet
# --------------------------------------------
sjt.glm(fitOR1, fitOR2, fitOR3,
        labelDependentVariables = c("Fertility",
                                    "Infant Mortality",
                                    "Agriculture"),
        labelPredictors = c("Education", "Examination", "Catholic"),
        showHeaderStrings = TRUE,
        CSS = list(css.table = "border: 2px solid;",
                   css.tdata = "border: 1px solid;",
                   css.depvarhead = "color:#003399;"))
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## Warning: glm.fit: fitted probabilities numerically 0 or 1 occurred
# --------------------------------------------
# Compare models with different link functions,
# but same predictors and response
# --------------------------------------------
library(sjmisc)
# load efc sample data
data(efc)
# dichtomozize service usage by "service usage yes/no"
efc$services <- sjmisc::dicho(efc$tot_sc_e, "v", 0, as.num = TRUE)
# fit 3 models with different link-functions
fit1 <- glm(services ~ neg_c_7 + c161sex + e42dep,
            data=efc,
            family=binomial(link="logit"))
fit2 <- glm(services ~ neg_c_7 + c161sex + e42dep,
            data=efc,
            family=binomial(link="probit"))
fit3 <- glm(services ~ neg_c_7 + c161sex + e42dep,
            data=efc,
            family=poisson(link="log"))
# compare models
sjt.glm(fit1, fit2, fit3,
        showAIC = TRUE,
        showFamily = TRUE)
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## Waiting for profiling to be done...
# --------------------------------------------
# Change style of p-values and CI-appearance
# --------------------------------------------
# open HTML-table in RStudio Viewer Pane or web browser,
# table indicating p-values as stars
sjt.glm(fit1, fit2, fit3,
        pvaluesAsNumbers = FALSE,
        showAIC = TRUE,
        showFamily = TRUE)
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## Waiting for profiling to be done...
# open HTML-table in RStudio Viewer Pane or web browser,
# indicating p-values as stars and integrate CI in OR column
sjt.glm(fit1, fit2, fit3,
        pvaluesAsNumbers = FALSE,
        separateConfColumn = FALSE,
        showAIC = TRUE,
        showFamily = TRUE,
        showPseudoR = TRUE)
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## Waiting for profiling to be done...
# ----------------------------------
# automatic grouping of predictors
# ----------------------------------
library(sjmisc)
# load efc sample data
data(efc)
# dichtomozize service usage by "service usage yes/no"
efc$services <- sjmisc::dicho(efc$tot_sc_e, "v", 0, as.num = TRUE)
# make dependency categorical
efc$e42dep <- to_fac(efc$e42dep)
## Warning: 'to_fac' is deprecated.
## Use 'to_factor' instead.
## See help("Deprecated")
# fit model with "grouped" predictor
fit <- glm(services ~ neg_c_7 + c161sex + e42dep, data = efc)
# automatic grouping of categorical predictors
sjt.glm(fit)
## Waiting for profiling to be done...
# ----------------------------------
# compare models with different predictors
# ----------------------------------
fit2 <- glm(services ~ neg_c_7 + c161sex + e42dep + c12hour, data = efc)
fit3 <- glm(services ~ neg_c_7 + c161sex + e42dep + c12hour + c172code,
            data = efc)
# print models with different predictors
sjt.glm(fit, fit2, fit3)
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## Fitted models have different coefficients. Grouping may not work properly. Set 'group.pred = FALSE' if you encouter cluttered labelling.
efc$c172code <- to_fac(efc$c172code)
## Warning: 'to_fac' is deprecated.
## Use 'to_factor' instead.
## See help("Deprecated")
fit2 <- glm(services ~ neg_c_7 + c161sex + c12hour, data = efc)
fit3 <- glm(services ~ neg_c_7 + c161sex + c172code, data = efc)
# print models with different predictors
sjt.glm(fit, fit2, fit3, group.pred = FALSE)
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## Waiting for profiling to be done...
## End(Not run)

sjt.glmer

#Examples
## Not run:
library(lme4)
library(sjmisc)
data(efc)
# create binary response
efc$hi_qol <- dicho(efc$quol_5)
# prepare group variable
efc$grp = as.factor(efc$e15relat)
levels(x = efc$grp) <- get_labels(efc$e15relat)
# data frame for fitted model
mydf <- data.frame(hi_qol = as.factor(efc$hi_qol),
                   sex = as.factor(efc$c161sex),
                   c12hour = as.numeric(efc$c12hour),
                   neg_c_7 = as.numeric(efc$neg_c_7),
                   education = as.factor(efc$c172code),
                   grp = efc$grp)
# fit glmer
fit1 <- glmer(hi_qol ~ sex + c12hour + neg_c_7 + (1|grp),
              data = mydf,
              family = binomial("logit"))
fit2 <- glmer(hi_qol ~ sex + c12hour + neg_c_7 + education + (1|grp),
              data = mydf,
              family = binomial("logit"))
# print summary table
sjt.glmer(fit1, fit2,
          ci.hyphen = " to ")
# print summary table, using different table layout
sjt.glmer(fit1, fit2,
          showAIC = TRUE,
          showConfInt = FALSE,
          showStdError = TRUE,
          pvaluesAsNumbers = FALSE)
# print summary table
sjt.glmer(fit1, fit2,
          labelPredictors = c("Elder's gender (female)",
                              "Hours of care per week",
                              "Negative Impact",
                              "Educational level (mid)",
                              "Educational level (high)"))
## End(Not run)

#Examples
## Not run:
library(sjmisc)
data(efc)
sjt.grpmean(efc$c12hour,
            efc$e42dep)
## End(Not run)

sjt.itemanalysis

#Examples
# -------------------------------
# Data from the EUROFAMCARE sample dataset
# -------------------------------
library(sjmisc)
data(efc)
# retrieve variable and value labels
varlabs <- get_label(efc)
# recveive first item of COPE-index scale
start <- which(colnames(efc) == "c82cop1")
# recveive last item of COPE-index scale
end <- which(colnames(efc) == "c90cop9")
# create data frame with COPE-index scale
mydf <- data.frame(efc[, c(start:end)])
colnames(mydf) <- varlabs[c(start:end)]
## Not run:
sjt.itemanalysis(mydf)
## 
  |                                                                       
  |                                                                 |   0%
  |                                                                       
  |=======                                                          |  11%
  |                                                                       
  |==============                                                   |  22%
  |                                                                       
  |======================                                           |  33%
  |                                                                       
  |=============================                                    |  44%
  |                                                                       
  |====================================                             |  56%
  |                                                                       
  |===========================================                      |  67%
  |                                                                       
  |===================================================              |  78%
  |                                                                       
  |==========================================================       |  89%
  |                                                                       
  |=================================================================| 100%
# -------------------------------
# auto-detection of labels
# -------------------------------
sjt.itemanalysis(efc[, c(start:end)])
## 
  |                                                                       
  |                                                                 |   0%
  |                                                                       
  |=======                                                          |  11%
  |                                                                       
  |==============                                                   |  22%
  |                                                                       
  |======================                                           |  33%
  |                                                                       
  |=============================                                    |  44%
  |                                                                       
  |====================================                             |  56%
  |                                                                       
  |===========================================                      |  67%
  |                                                                       
  |===================================================              |  78%
  |                                                                       
  |==========================================================       |  89%
  |                                                                       
  |=================================================================| 100%
# ---------------------------------------
# Compute PCA on Cope-Index, and perform a
# item analysis for each extracted factor.
# ---------------------------------------
factor.groups <- sjt.pca(mydf, no.output = TRUE)$factor.index
sjt.itemanalysis(mydf, factor.groups)
## 
  |                                                                       
  |                                                                 |   0%
  |                                                                       
  |===========                                                      |  17%
  |                                                                       
  |======================                                           |  33%
  |                                                                       
  |================================                                 |  50%
  |                                                                       
  |===========================================                      |  67%
  |                                                                       
  |======================================================           |  83%
  |                                                                       
  |=================================================================| 100%
## 
  |                                                                       
  |                                                                 |   0%
  |                                                                       
  |======================                                           |  33%
  |                                                                       
  |===========================================                      |  67%
  |                                                                       
  |=================================================================| 100%
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## Warning in cor.test.default(df[[i]], df[[j]], alternative = "two.sided", :
## Cannot compute exact p-value with ties
## End(Not run)

sjt.lm

#Examples
## Not run:
# Now fit the models. Note that both models share the same predictors
# and only differ in their dependent variable. See examples of stepwise
# models below at the end.
library(sjmisc)
data(efc)
# fit first model
fit1 <- lm(barthtot ~ c160age + c12hour + c161sex + c172code, 
           data = efc)
# fit second model
fit2 <- lm(neg_c_7 ~ c160age + c12hour + c161sex + c172code, data = efc)
# create and open HTML-table in RStudio Viewer Pane or web browser
# note that we don't need to specify labels for the predictors,
# because these are automatically read
sjt.lm(fit1, fit2)
# create and open HTML-table in RStudio Viewer Pane or web browser
# in the following examples, we set labels via argument
sjt.lm(fit1,
       fit2,
       labelDependentVariables = c("Barthel-Index",
                                   "Negative Impact"),
       labelPredictors = c("Carer's Age",
                           "Hours of Care",
                           "Carer's Sex",
                           "Educational Status"))
# show HTML-table, indicating p-values as asterisks
sjt.lm(fit1,
       fit2,
       labelDependentVariables = c("Barthel-Index",
                                   "Negative Impact"),
       labelPredictors = c("Carer's Age",
                           "Hours of Care",
                           "Carer's Sex",
                           "Educational Status"),
       showStdBeta = TRUE,
       pvaluesAsNumbers = FALSE)
# create and open HTML-table in RStudio Viewer Pane or web browser,
# integrate CI in estimate column
sjt.lm(fit1,
       fit2,
       labelDependentVariables = c("Barthel-Index",
                                   "Negative Impact"),
       labelPredictors = c("Carer's Age",
                           "Hours of Care",
                           "Carer's Sex",
                           "Educational Status"),
       separateConfColumn = FALSE)
# show HTML-table, indicating p-values as numbers
# and printing CI in a separate column
sjt.lm(fit1,
       fit2,
       labelDependentVariables = c("Barthel-Index",
                                   "Negative Impact"),
       labelPredictors = c("Carer's Age",
                           "Hours of Care",
                           "Carer's Sex",
                           "Educational Status"),
       showStdBeta = TRUE)
# show HTML-table, indicating p-values as stars
# and integrate CI in estimate column
sjt.lm(fit1,
       fit2,
       labelDependentVariables = c("Barthel-Index",
                                   "Negative Impact"),
       labelPredictors = c("Carer's Age",
                           "Hours of Care",
                           "Carer's Sex",
                           "Educational Status"),
       showStdBeta = TRUE,
       ci.hyphen = " to ",
       minus.sign = "&minus;",
       pvaluesAsNumbers = FALSE,
       separateConfColumn = FALSE)
# ----------------------------------
# connecting two html-tables
# ----------------------------------
# fit two more models
fit3 <- lm(tot_sc_e ~ c160age + c12hour + c161sex + c172code, data=efc)
fit4 <- lm(e42dep ~ c160age + c12hour + c161sex + c172code, data=efc)
# create and save first HTML-table
part1 <- sjt.lm(fit1,
                fit2,
                labelDependentVariables = c("Barthel-Index",
                                            "Negative Impact"),
                labelPredictors = c("Carer's Age",
                                    "Hours of Care",
                                    "Carer's Sex",
                                    "Educational Status"))
# create and save second HTML-table
part2 <- sjt.lm(fit3,
                fit4,
                labelDependentVariables = c("Service Usage",
                                            "Elder's Dependency"),
                labelPredictors = c("Carer's Age",
                                    "Hours of Care",
                                    "Carer's Sex",
                                    "Educational Status"))
# browse temporary file
htmlFile <- tempfile(fileext=".html")
write(sprintf("<html><head>%s</head><body>%s<p></p>%s</body></html>",
              part1$page.style,
              part1$page.content,
              part2$page.content),
      file = htmlFile)
viewer <- getOption("viewer")
if (!is.null(viewer)) viewer(htmlFile) else utils::browseURL(htmlFile)
# ----------------------------------
# User defined style sheet
# ----------------------------------
sjt.lm(fit1,
       fit2,
       labelDependentVariables = c("Barthel-Index", "Negative Impact"),
       labelPredictors = c("Carer's Age",
                           "Hours of Care",
                           "Carer's Sex",
                           "Educational Status"),
       CSS = list(css.table = "border: 2px solid;",
                  css.tdata = "border: 1px solid;",
                  css.depvarhead = "color:#003399;"))
# ----------------------------------
# automatic grouping of predictors
# ----------------------------------
library(sjmisc)
data(efc)
# make education categorical
efc$c172code <- to_factor(efc$c172code)
# fit first model again (with c172code as factor)
fit1 <- lm(barthtot ~ c160age + c12hour + c172code + c161sex, data=efc)
# fit second model again (with c172code as factor)
fit2 <- lm(neg_c_7 ~ c160age + c12hour + c172code + c161sex, data=efc)
# plot models, but group by predictors
sjt.lm(fit1,
       fit2,
       group.pred = TRUE)
# ----------------------------------------
# compare models with different predictors
# ----------------------------------------
library(sjmisc)
data(efc)
# make education categorical
efc$c172code <- to_factor(efc$c172code)
# make education categorical
efc$e42dep <- to_factor(efc$e42dep)
# fit first model
fit1 <- lm(neg_c_7 ~ c160age + c172code + c161sex, data = efc)
# fit second model
fit2 <- lm(neg_c_7 ~ c160age + c172code + c161sex + c12hour, data = efc)
# fit second model
fit3 <- lm(neg_c_7 ~ c160age + c172code + e42dep + tot_sc_e, data = efc)
sjt.lm(fit1, fit2, fit3)
## Fitted models have different coefficients. Grouping may not work properly. Set 'group.pred = FALSE' if you encouter cluttered labelling.
# ----------------------------------------
# compare models with different predictors
# and grouping
# ----------------------------------------
# make cope-index categorical
efc$c82cop1 <- to_fac(efc$c82cop1)
## Warning: 'to_fac' is deprecated.
## Use 'to_factor' instead.
## See help("Deprecated")
# fit another model
fit4 <- lm(neg_c_7 ~ c160age + c172code + e42dep + tot_sc_e + c82cop1,
           data = efc)
sjt.lm(fit1, fit2, fit4, fit3)
## Fitted models have different coefficients. Grouping may not work properly. Set 'group.pred = FALSE' if you encouter cluttered labelling.
# show standardized beta only
sjt.lm(fit1, fit2, fit4, fit3,
       showEst = FALSE,
       showStdBeta = TRUE,
       showAIC = TRUE,
       showFStat = TRUE)
## Fitted models have different coefficients. Grouping may not work properly. Set 'group.pred = FALSE' if you encouter cluttered labelling.
# -----------------------------------------------------------
# color insanity. just to show that each column has an own
# CSS-tag, so - depending on the stats and values you show -
# you can define column spaces / margins, border etc. to
# visually separate your models in the table
# -----------------------------------------------------------
sjt.lm(fit1, fit2, fit4, fit3,
       showStdBeta = TRUE,
       showAIC = TRUE,
       showFStat = TRUE,
       showStdError = TRUE,
       CSS = list(css.modelcolumn1 = 'color:blue;',
                  css.modelcolumn2 = 'color:red;',
                  css.modelcolumn3 = 'color:green;',
                  css.modelcolumn4 = 'color:#ffff00;',
                  css.modelcolumn5 = 'color:#777777;',
                  css.modelcolumn6 = 'color:#3399cc;'))
## Fitted models have different coefficients. Grouping may not work properly. Set 'group.pred = FALSE' if you encouter cluttered labelling.
sjt.lm(fit1, fit2, fit4, fit3,
       showEst = FALSE,
       showStdBeta = TRUE,
       pvaluesAsNumbers = FALSE,
       group.pred = FALSE,
       CSS = list(css.modelcolumn4 = 'border-left:1px solid black;',
                  css.modelcolumn5 = 'padding-right:50px;'))
## End(Not run)

sjt.stackfrq

#Examples
# -------------------------------
# random sample
# -------------------------------
# prepare data for 4-category likert scale, 5 items
likert_4 <- data.frame(as.factor(sample(1:4,
                                        500,
                                        replace = TRUE,
                                        prob = c(0.2, 0.3, 0.1, 0.4))),
                       as.factor(sample(1:4,
                                        500,
                                        replace = TRUE,
                                        prob = c(0.5, 0.25, 0.15, 0.1))),
                       as.factor(sample(1:4,
                                        500,
                                        replace = TRUE,
                                        prob = c(0.25, 0.1, 0.4, 0.25))),
                       as.factor(sample(1:4,
                                        500,
                                        replace = TRUE,
                                        prob = c(0.1, 0.4, 0.4, 0.1))),
                       as.factor(sample(1:4,
                                        500,
                                        replace = TRUE,
                                        prob = c(0.35, 0.25, 0.15, 0.25))))
# create labels
levels_4 <- c("Independent",
              "Slightly dependent",
              "Dependent",
              "Severely dependent")
# create item labels
items <- c("Q1", "Q2", "Q3", "Q4", "Q5")
# plot stacked frequencies of 5 (ordered) item-scales
## Not run:
sjt.stackfrq(likert_4, valuelabels = levels_4, varlabels = items)
# -------------------------------
# Data from the EUROFAMCARE sample dataset
# Auto-detection of labels
# -------------------------------
library(sjmisc)
data(efc)
# recveive first item of COPE-index scale
start <- which(colnames(efc) == "c82cop1")
# recveive first item of COPE-index scale
end <- which(colnames(efc) == "c90cop9")
sjt.stackfrq(efc[, c(start:end)],
             alternateRowColors = TRUE)
sjt.stackfrq(efc[, c(start:end)],
             alternateRowColors = TRUE,
             showN = TRUE,
             showNA = TRUE)
# --------------------------------
# User defined style sheet
# --------------------------------
sjt.stackfrq(efc[, c(start:end)],
             alternateRowColors = TRUE,
             showTotalN = TRUE,
             showSkew = TRUE,
             showKurtosis = TRUE,
             CSS = list(css.ncol = "border-left:1px dotted black;",
                        css.summary = "font-style:italic;"))
## End(Not run)

sjt.xtab

#Examples
# prepare sample data set
library(sjmisc)
data(efc)
efc.labels <- get_labels(efc)
# print simple cross table w/o labels
## Not run:
sjt.xtab(efc$e16sex, efc$e42dep)
# print cross table with manually set
# labels and expected values
sjt.xtab(efc$e16sex,
         efc$e42dep,
         variableLabels = c("Elder's gender",
                            "Elder's dependency"),
         valueLabels = list(efc.labels[['e16sex']],
                            efc.labels[['e42dep']]),
         showExpected = TRUE)
# print minimal cross table with labels, total col/row highlighted
sjt.xtab(efc$e16sex, efc$e42dep,
         variableLabels = c("Elder's gender", "Elder's dependency"),
         valueLabels = list(efc.labels[['e16sex']], efc.labels[['e42dep']]),
         showHorizontalLine = FALSE,
         showCellPerc = FALSE,
         highlightTotal = TRUE)
# -------------------------------
# auto-detection of labels
# -------------------------------
# print cross table with labels and all percentages
sjt.xtab(efc$e16sex, efc$e42dep,
         showRowPerc = TRUE,
         showColPerc = TRUE)
# print cross table with labels and all percentages, including
# grouping variable
sjt.xtab(efc$e16sex, efc$e42dep, efc$c161sex,
         variableLabels=c("Elder's gender",
                          "Elder's dependency",
                          "Carer's gender"),
         valueLabels=list(efc.labels[['e16sex']],
                          efc.labels[['e42dep']],
                          efc.labels[['c161sex']]),
         showRowPerc = TRUE,
         showColPerc = TRUE)
## Warning in chisq.test(tab): Chi-squared approximation may be incorrect
# ----------------------------------------------------------------
# User defined style sheet
# ----------------------------------------------------------------
sjt.xtab(efc$e16sex, efc$e42dep,
         variableLabels = c("Elder's gender", "Elder's dependency"),
         valueLabels = list(efc.labels[['e16sex']],
                            efc.labels[['e42dep']]),
         CSS = list(css.table = "border: 2px solid;",
                    css.tdata = "border: 1px solid;",
                    css.horline = "border-bottom: double blue;"))
## End(Not run)

view_df

## Not run:
# init dataset
library(sjmisc)
data(efc)
# view variables
view_df(efc)
## 
  |                                                                       
  |                                                                 |   0%
  |                                                                       
  |==                                                               |   4%
  |                                                                       
  |=====                                                            |   8%
  |                                                                       
  |========                                                         |  12%
  |                                                                       
  |==========                                                       |  15%
  |                                                                       
  |============                                                     |  19%
  |                                                                       
  |===============                                                  |  23%
  |                                                                       
  |==================                                               |  27%
  |                                                                       
  |====================                                             |  31%
  |                                                                       
  |======================                                           |  35%
  |                                                                       
  |=========================                                        |  38%
  |                                                                       
  |============================                                     |  42%
  |                                                                       
  |==============================                                   |  46%
  |                                                                       
  |================================                                 |  50%
  |                                                                       
  |===================================                              |  54%
  |                                                                       
  |======================================                           |  58%
  |                                                                       
  |========================================                         |  62%
  |                                                                       
  |==========================================                       |  65%
  |                                                                       
  |=============================================                    |  69%
  |                                                                       
  |================================================                 |  73%
  |                                                                       
  |==================================================               |  77%
  |                                                                       
  |====================================================             |  81%
  |                                                                       
  |=======================================================          |  85%
  |                                                                       
  |==========================================================       |  88%
  |                                                                       
  |============================================================     |  92%
  |                                                                       
  |==============================================================   |  96%
  |                                                                       
  |=================================================================| 100%
# view variables w/o values and value labels
view_df(efc, showValues = FALSE, showValueLabels = FALSE)
## 
  |                                                                       
  |                                                                 |   0%
  |                                                                       
  |==                                                               |   4%
  |                                                                       
  |=====                                                            |   8%
  |                                                                       
  |========                                                         |  12%
  |                                                                       
  |==========                                                       |  15%
  |                                                                       
  |============                                                     |  19%
  |                                                                       
  |===============                                                  |  23%
  |                                                                       
  |==================                                               |  27%
  |                                                                       
  |====================                                             |  31%
  |                                                                       
  |======================                                           |  35%
  |                                                                       
  |=========================                                        |  38%
  |                                                                       
  |============================                                     |  42%
  |                                                                       
  |==============================                                   |  46%
  |                                                                       
  |================================                                 |  50%
  |                                                                       
  |===================================                              |  54%
  |                                                                       
  |======================================                           |  58%
  |                                                                       
  |========================================                         |  62%
  |                                                                       
  |==========================================                       |  65%
  |                                                                       
  |=============================================                    |  69%
  |                                                                       
  |================================================                 |  73%
  |                                                                       
  |==================================================               |  77%
  |                                                                       
  |====================================================             |  81%
  |                                                                       
  |=======================================================          |  85%
  |                                                                       
  |==========================================================       |  88%
  |                                                                       
  |============================================================     |  92%
  |                                                                       
  |==============================================================   |  96%
  |                                                                       
  |=================================================================| 100%
# view variables including variable typed, orderd by name
view_df(efc, sortByName = TRUE, showType = TRUE)
## 
  |                                                                       
  |                                                                 |   0%
  |                                                                       
  |==                                                               |   4%
  |                                                                       
  |=====                                                            |   8%
  |                                                                       
  |========                                                         |  12%
  |                                                                       
  |==========                                                       |  15%
  |                                                                       
  |============                                                     |  19%
  |                                                                       
  |===============                                                  |  23%
  |                                                                       
  |==================                                               |  27%
  |                                                                       
  |====================                                             |  31%
  |                                                                       
  |======================                                           |  35%
  |                                                                       
  |=========================                                        |  38%
  |                                                                       
  |============================                                     |  42%
  |                                                                       
  |==============================                                   |  46%
  |                                                                       
  |================================                                 |  50%
  |                                                                       
  |===================================                              |  54%
  |                                                                       
  |======================================                           |  58%
  |                                                                       
  |========================================                         |  62%
  |                                                                       
  |==========================================                       |  65%
  |                                                                       
  |=============================================                    |  69%
  |                                                                       
  |================================================                 |  73%
  |                                                                       
  |==================================================               |  77%
  |                                                                       
  |====================================================             |  81%
  |                                                                       
  |=======================================================          |  85%
  |                                                                       
  |==========================================================       |  88%
  |                                                                       
  |============================================================     |  92%
  |                                                                       
  |==============================================================   |  96%
  |                                                                       
  |=================================================================| 100%
# ----------------------------------------------------------------
# User defined style sheet
# ----------------------------------------------------------------
view_df(efc,
        CSS = list(css.table = "border: 2px solid;",
                   css.tdata = "border: 1px solid;",
                   css.arc = "color:blue;"))
## 
  |                                                                       
  |                                                                 |   0%
  |                                                                       
  |==                                                               |   4%
  |                                                                       
  |=====                                                            |   8%
  |                                                                       
  |========                                                         |  12%
  |                                                                       
  |==========                                                       |  15%
  |                                                                       
  |============                                                     |  19%
  |                                                                       
  |===============                                                  |  23%
  |                                                                       
  |==================                                               |  27%
  |                                                                       
  |====================                                             |  31%
  |                                                                       
  |======================                                           |  35%
  |                                                                       
  |=========================                                        |  38%
  |                                                                       
  |============================                                     |  42%
  |                                                                       
  |==============================                                   |  46%
  |                                                                       
  |================================                                 |  50%
  |                                                                       
  |===================================                              |  54%
  |                                                                       
  |======================================                           |  58%
  |                                                                       
  |========================================                         |  62%
  |                                                                       
  |==========================================                       |  65%
  |                                                                       
  |=============================================                    |  69%
  |                                                                       
  |================================================                 |  73%
  |                                                                       
  |==================================================               |  77%
  |                                                                       
  |====================================================             |  81%
  |                                                                       
  |=======================================================          |  85%
  |                                                                       
  |==========================================================       |  88%
  |                                                                       
  |============================================================     |  92%
  |                                                                       
  |==============================================================   |  96%
  |                                                                       
  |=================================================================| 100%
## End(Not run)


data(iris)
view_df(iris,
        CSS = list(css.table = "border: 2px solid;",
                        css.tdata = "border: 1px solid;",
                        css.arc = "color:blue;"))
## 
  |                                                                       
  |                                                                 |   0%
  |                                                                       
  |=============                                                    |  20%
  |                                                                       
  |==========================                                       |  40%
  |                                                                       
  |=======================================                          |  60%
  |                                                                       
  |====================================================             |  80%
  |                                                                       
  |=================================================================| 100%

adjust_plot_range

#Examples
# sample data set
library(sjmisc)
data(efc)
# show frequencies of relationship-variable and
# retrieve plot object
gp <- sjp.frq(efc$e15relat, printPlot = FALSE)
# show current plot
plot(gp$plot)
# show adjusted plot
adjust_plot_range(gp$plot)

dist_chisq

#Examples
# a simple chi-squared distribution
# for 6 degrees of freedom
dist_chisq(deg.f = 6)

# a chi-squared distribution for 6 degrees of freedom,
# and a shaded area starting at chi-squared value of ten.
# With a df of 6, a chi-squared value of 12.59 would be "significant",
# thus the shaded area from 10 to 12.58 is filled as "non-significant",
# while the area starting from chi-squared value 12.59 is filled as
# "significant"
dist_chisq(chi2 = 10, deg.f = 6)

# a chi-squared distribution for 6 degrees of freedom,
# and a shaded area starting at that chi-squared value, which has
# a p-level of about 0.125 (which equals a chi-squared value of about 10).
# With a df of 6, a chi-squared value of 12.59 would be "significant",
# thus the shaded area from 10 to 12.58 (p-level 0.125 to p-level 0.05)
# is filled as "non-significant", while the area starting from chi-squared
# value 12.59 (p-level < 0.05) is filled as "significant".
dist_chisq(p = 0.125, deg.f = 6)

dist_f

#Examples
# a simple F distribution for 6 and 45 degrees of freedom
dist_f(deg.f1 = 6, deg.f2 = 45)

# F distribution for 6 and 45 degrees of freedom,
# and a shaded area starting at F value of two.
# F-values equal or greater than 2.31 are "significant"
dist_f(f = 2, deg.f1 = 6, deg.f2 = 45)

# F distribution for 6 and 45 degrees of freedom,
# and a shaded area starting at a p-level of 0.2
# (F-Value about 1.5).
dist_f(p = 0.2, deg.f1 = 6, deg.f2 = 45)

dist_norm

#Examples
# a simple normal distribution
dist_norm()

# a simple normal distribution with different mean and sd.
# note that curve looks similar to above plot, but axis range
# has changed.
dist_norm(mean = 2, sd = 4)

# a simple normal distribution
dist_norm(norm = 1)

# a simple normal distribution
dist_norm(p = 0.2)

dist_t

#Examples
# a simple t-distribution
# for 6 degrees of freedom
dist_t(deg.f = 6)

# a t-distribution for 6 degrees of freedom,
# and a shaded area starting at t-value of one.
# With a df of 6, a t-value of 1.94 would be "significant".
dist_t(t = 1, deg.f = 6)

# a t-distribution for 6 degrees of freedom,
# and a shaded area starting at p-level of 0.4
# (t-value of about 0.26).
dist_t(p = 0.4, deg.f = 6)

sjc.cluster

#Examples
# Hierarchical clustering of mtcars-dataset
groups <- sjc.cluster(mtcars, 5)
# K-means clustering of mtcars-dataset
groups <- sjc.cluster(mtcars, 5, method="k")

sjc.dend

#Examples
# Plot dendrogram of hierarchical clustering of mtcars-dataset
# and show group classification
sjc.dend(mtcars, 5)

# Plot dendrogram of hierarchical clustering of mtcars-dataset
# and show group classification for 2 to 4 groups
sjc.dend(mtcars, 2:4)

sjc.elbow

#Examples
# plot elbow values of mtcars dataset
sjc.elbow(mtcars)

sjc.grpdisc

#Examples
# retrieve group classification from hierarchical cluster analysis
# on the mtcars data set (5 groups)
groups <- sjc.cluster(mtcars, 5)
# plot goodness of group classificatoin
sjc.grpdisc(mtcars, groups, 5)
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.

sjc.kgap

#Examples
## Not run:
# plot gap statistic and determine best number of clusters
# in mtcars dataset
sjc.kgap(mtcars)
## Clustering Gap statistic ["clusGap"].
## B=100 simulated reference sets, k = 1..10
## 
##  --> Number of clusters (method 'Tibs2001SEmax', SE.factor=1): 2

# and in iris dataset
sjc.kgap(iris[,1:4])
## Clustering Gap statistic ["clusGap"].
## B=100 simulated reference sets, k = 1..10
## 
##  --> Number of clusters (method 'Tibs2001SEmax', SE.factor=1): 3

## End(Not run)

sjc.qclus

#Examples
## Not run:
# k-means clustering of mtcars-dataset
sjc.qclus(mtcars)
## Clustering Gap statistic ["clusGap"].
## B=100 simulated reference sets, k = 1..10
## 
##  --> Number of clusters (method 'Tibs2001SEmax', SE.factor=1): 2
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.

sjc.qclus(iris[,1:4])
## Clustering Gap statistic ["clusGap"].
## B=100 simulated reference sets, k = 1..10
## 
##  --> Number of clusters (method 'Tibs2001SEmax', SE.factor=1): 3
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.

# k-means clustering of mtcars-dataset with 4 pre-defined
# groups in a faceted panel
sjc.qclus(airquality,
          groupcount = 4,
          facetCluster = TRUE)
## Warning: `show_guide` has been deprecated. Please use `show.legend`
## instead.

## End(Not run)

k-means clustering of airquality data

and saving the results. most likely, 3 cluster

groups have been found (see below).

airgrp <- sjc.qclus(airquality) # “re-plot” cluster groups, without computing # new k-means cluster analysis. sjc.qclus(airquality, groupcount = 3, groups = airgrp$classification)