library(rio) # r package to import or export dataset
library(tidyverse) # r package to wrangle dataset
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr 1.1.4 ✔ readr 2.1.6
## ✔ forcats 1.0.1 ✔ stringr 1.6.0
## ✔ ggplot2 4.0.1 ✔ tibble 3.3.1
## ✔ lubridate 1.9.4 ✔ tidyr 1.3.2
## ✔ purrr 1.2.1
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag() masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(psych) # r package to calculate internal consistency
##
## Attaching package: 'psych'
##
## The following objects are masked from 'package:ggplot2':
##
## %+%, alpha
library(irr) # r package to calculate inter-rater reliability
## Loading required package: lpSolve
We will use the same dataset (Nerdy scale) used in class, “ITLS_4160_internal_consistency.xlsx.”
Calculate Cronbach’s alpha for 16 items (Q1–Q16).
nerdy_data <- import("ITLS_4160_internal_consistency.xlsx")
head(nerdy_data)
## Q1 Q2 Q3 Q4 Q5 Q6 Q7 Q8 Q9 Q10 Q11 Q12 Q13 Q14 Q15 Q16 Q17 Q18 Q19 Q20 Q21
## 1 3 5 3 3 5 5 5 3 5 5 4 5 5 5 3 5 4 5 5 5 5
## 2 4 4 4 3 5 2 5 1 4 4 1 5 4 4 1 3 3 3 1 3 3
## 3 5 5 5 5 5 5 5 5 5 5 5 4 5 5 4 5 5 4 5 5 5
## 4 5 5 5 5 5 5 5 3 5 5 5 5 4 4 4 2 5 5 5 5 4
## 5 4 4 4 4 4 4 4 4 4 5 4 4 5 5 1 1 5 5 3 5 5
## 6 4 4 4 4 4 4 5 3 3 4 4 4 3 5 3 4 3 4 5 4 4
## Q22 Q23 Q24 Q25 Q26 country introelapse testelapse surveyelapse race_arab
## 1 5 5 5 5 5 US 41 93 125 0
## 2 3 4 4 4 5 GB 13 131 161 0
## 3 5 5 5 3 5 PL 2 49 87 0
## 4 1 5 5 5 5 US 26 710 228 0
## 5 4 4 5 4 0 US 6 195 424 0
## 6 2 5 5 3 4 US 410 117 113 0
## race_asian race_black race_white race_hispanic race_nativeam race_nativeau
## 1 0 0 1 0 0 0
## 2 0 0 1 0 0 0
## 3 0 0 1 0 0 0
## 4 0 0 1 0 0 0
## 5 0 0 1 0 0 0
## 6 0 0 1 0 0 0
## race_other nerdy ASD
## 1 0 7 2
## 2 0 6 2
## 3 0 7 2
## 4 0 7 2
## 5 0 6 2
## 6 0 5 2
nerdy_data %>%
select(Q1:Q16) %>%
describe ()
## vars n mean sd median trimmed mad min max range skew kurtosis se
## Q1 1 1000 3.93 1.11 4 4.10 1.48 0 5 5 -1.05 0.58 0.04
## Q2 2 1000 4.00 1.25 4 4.21 1.48 0 5 5 -1.18 0.40 0.04
## Q3 3 1000 4.17 1.06 4 4.38 1.48 0 5 5 -1.49 1.94 0.03
## Q4 4 1000 3.73 1.26 4 3.88 1.48 0 5 5 -0.73 -0.50 0.04
## Q5 5 1000 3.81 1.22 4 3.97 1.48 0 5 5 -0.91 -0.04 0.04
## Q6 6 1000 3.66 1.20 4 3.79 1.48 0 5 5 -0.68 -0.31 0.04
## Q7 7 1000 4.13 1.14 4 4.35 1.48 0 5 5 -1.40 1.35 0.04
## Q8 8 1000 3.80 1.36 4 4.00 1.48 0 5 5 -0.93 -0.35 0.04
## Q9 9 1000 3.93 1.11 4 4.09 1.48 0 5 5 -0.95 0.28 0.04
## Q10 10 1000 4.08 1.05 4 4.24 1.48 0 5 5 -1.17 1.11 0.03
## Q11 11 1000 3.14 1.57 3 3.18 2.97 0 5 5 -0.21 -1.47 0.05
## Q12 12 1000 3.80 1.25 4 3.97 1.48 0 5 5 -0.89 -0.16 0.04
## Q13 13 1000 3.65 1.32 4 3.80 1.48 0 5 5 -0.60 -0.77 0.04
## Q14 14 1000 3.63 1.20 4 3.74 1.48 0 5 5 -0.58 -0.49 0.04
## Q15 15 1000 2.94 1.51 3 2.93 1.48 0 5 5 -0.01 -1.41 0.05
## Q16 16 1000 3.48 1.42 4 3.60 1.48 0 5 5 -0.47 -1.11 0.04
nerdy_data %>%
select(Q1:Q16) %>%
alpha()
##
## Reliability analysis
## Call: alpha(x = .)
##
## raw_alpha std.alpha G6(smc) average_r S/N ase mean sd median_r
## 0.8 0.8 0.82 0.2 4 0.0094 3.7 0.63 0.19
##
## 95% confidence boundaries
## lower alpha upper
## Feldt 0.78 0.8 0.82
## Duhachek 0.78 0.8 0.82
##
## Reliability if an item is dropped:
## raw_alpha std.alpha G6(smc) average_r S/N alpha se var.r med.r
## Q1 0.78 0.79 0.81 0.20 3.7 0.0101 0.011 0.18
## Q2 0.79 0.79 0.81 0.20 3.8 0.0099 0.012 0.20
## Q3 0.79 0.79 0.82 0.20 3.8 0.0098 0.011 0.19
## Q4 0.78 0.79 0.81 0.20 3.7 0.0102 0.011 0.18
## Q5 0.79 0.79 0.81 0.20 3.8 0.0098 0.011 0.18
## Q6 0.79 0.79 0.81 0.20 3.8 0.0099 0.012 0.19
## Q7 0.79 0.79 0.81 0.20 3.9 0.0097 0.010 0.19
## Q8 0.78 0.79 0.81 0.20 3.7 0.0101 0.011 0.18
## Q9 0.79 0.79 0.81 0.20 3.8 0.0098 0.011 0.18
## Q10 0.79 0.79 0.82 0.20 3.9 0.0097 0.012 0.18
## Q11 0.79 0.80 0.82 0.21 3.9 0.0096 0.012 0.19
## Q12 0.79 0.79 0.81 0.20 3.7 0.0099 0.011 0.19
## Q13 0.78 0.79 0.81 0.20 3.7 0.0102 0.012 0.18
## Q14 0.79 0.79 0.82 0.20 3.8 0.0098 0.012 0.19
## Q15 0.79 0.80 0.82 0.21 3.9 0.0096 0.011 0.19
## Q16 0.79 0.79 0.82 0.20 3.8 0.0098 0.011 0.19
##
## Item statistics
## n raw.r std.r r.cor r.drop mean sd
## Q1 1000 0.57 0.57 0.55 0.49 3.9 1.1
## Q2 1000 0.50 0.49 0.44 0.40 4.0 1.2
## Q3 1000 0.47 0.48 0.42 0.38 4.2 1.1
## Q4 1000 0.57 0.58 0.55 0.48 3.7 1.3
## Q5 1000 0.49 0.51 0.46 0.39 3.8 1.2
## Q6 1000 0.49 0.51 0.46 0.39 3.7 1.2
## Q7 1000 0.44 0.46 0.42 0.35 4.1 1.1
## Q8 1000 0.56 0.54 0.50 0.45 3.8 1.4
## Q9 1000 0.48 0.51 0.47 0.39 3.9 1.1
## Q10 1000 0.43 0.46 0.39 0.34 4.1 1.0
## Q11 1000 0.47 0.44 0.37 0.34 3.1 1.6
## Q12 1000 0.52 0.53 0.49 0.42 3.8 1.2
## Q13 1000 0.57 0.57 0.54 0.47 3.6 1.3
## Q14 1000 0.48 0.49 0.43 0.38 3.6 1.2
## Q15 1000 0.46 0.43 0.37 0.33 2.9 1.5
## Q16 1000 0.49 0.48 0.42 0.38 3.5 1.4
##
## Non missing response frequency for each item
## 0 1 2 3 4 5 miss
## Q1 0.00 0.05 0.06 0.17 0.36 0.37 0
## Q2 0.00 0.06 0.08 0.11 0.26 0.48 0
## Q3 0.00 0.04 0.05 0.10 0.33 0.48 0
## Q4 0.00 0.07 0.12 0.17 0.28 0.36 0
## Q5 0.00 0.06 0.10 0.15 0.33 0.35 0
## Q6 0.00 0.06 0.11 0.22 0.31 0.30 0
## Q7 0.01 0.04 0.06 0.10 0.29 0.50 0
## Q8 0.00 0.10 0.10 0.11 0.28 0.42 0
## Q9 0.00 0.03 0.08 0.17 0.33 0.38 0
## Q10 0.00 0.03 0.04 0.18 0.30 0.44 0
## Q11 0.00 0.26 0.10 0.15 0.21 0.28 0
## Q12 0.00 0.06 0.11 0.14 0.31 0.37 0
## Q13 0.00 0.09 0.12 0.22 0.21 0.37 0
## Q14 0.00 0.06 0.12 0.23 0.29 0.29 0
## Q15 0.00 0.26 0.16 0.17 0.20 0.21 0
## Q16 0.00 0.14 0.14 0.17 0.22 0.33 0
Let’s interpret Cronbach’s α, based on Nunally (1978)’s interpretation guideline.
≥ .90: Excellent (but may indicate item redundancy)
.80 – .89: Good
.70 – .79: Acceptable
.60 – .69: Questionable
.50 – .59: Poor
< .50: Unacceptable
Interpretation: Sixteen items (Q1 - Q16) of the Nerdy Personality Attributes Scale yielded [put your interpretation!]
The alpha score averages .8 for 16 items of data. This falls under
the Good score. This means that the internal consistency is working
together. This shows that the majority of the items correlate with each
other.
# Task 1-2. Calculate Cronbach’s alpha (Q1 - Q26)
We will use the same dataset (Nerdy scale) used in class, “ITLS_4160_internal_consistency.xlsx.”, but please calculate Cronbach’s alpha for all 26 items (Q1–Q26).
nerdy_data %>%
select(Q1:Q26) %>%
describe()
## vars n mean sd median trimmed mad min max range skew kurtosis se
## Q1 1 1000 3.93 1.11 4 4.10 1.48 0 5 5 -1.05 0.58 0.04
## Q2 2 1000 4.00 1.25 4 4.21 1.48 0 5 5 -1.18 0.40 0.04
## Q3 3 1000 4.17 1.06 4 4.38 1.48 0 5 5 -1.49 1.94 0.03
## Q4 4 1000 3.73 1.26 4 3.88 1.48 0 5 5 -0.73 -0.50 0.04
## Q5 5 1000 3.81 1.22 4 3.97 1.48 0 5 5 -0.91 -0.04 0.04
## Q6 6 1000 3.66 1.20 4 3.79 1.48 0 5 5 -0.68 -0.31 0.04
## Q7 7 1000 4.13 1.14 4 4.35 1.48 0 5 5 -1.40 1.35 0.04
## Q8 8 1000 3.80 1.36 4 4.00 1.48 0 5 5 -0.93 -0.35 0.04
## Q9 9 1000 3.93 1.11 4 4.09 1.48 0 5 5 -0.95 0.28 0.04
## Q10 10 1000 4.08 1.05 4 4.24 1.48 0 5 5 -1.17 1.11 0.03
## Q11 11 1000 3.14 1.57 3 3.18 2.97 0 5 5 -0.21 -1.47 0.05
## Q12 12 1000 3.80 1.25 4 3.97 1.48 0 5 5 -0.89 -0.16 0.04
## Q13 13 1000 3.65 1.32 4 3.80 1.48 0 5 5 -0.60 -0.77 0.04
## Q14 14 1000 3.63 1.20 4 3.74 1.48 0 5 5 -0.58 -0.49 0.04
## Q15 15 1000 2.94 1.51 3 2.93 1.48 0 5 5 -0.01 -1.41 0.05
## Q16 16 1000 3.48 1.42 4 3.60 1.48 0 5 5 -0.47 -1.11 0.04
## Q17 17 1000 3.86 1.17 4 4.02 1.48 0 5 5 -0.90 0.05 0.04
## Q18 18 1000 3.89 1.35 4 4.12 1.48 0 5 5 -1.06 -0.12 0.04
## Q19 19 1000 3.42 1.56 4 3.52 1.48 0 5 5 -0.43 -1.35 0.05
## Q20 20 1000 3.62 1.18 4 3.72 1.48 0 5 5 -0.48 -0.60 0.04
## Q21 21 1000 2.97 1.63 3 2.96 2.97 0 5 5 -0.02 -1.61 0.05
## Q22 22 1000 2.42 1.38 2 2.28 1.48 0 5 5 0.57 -0.94 0.04
## Q23 23 1000 3.85 1.38 4 4.07 1.48 0 5 5 -0.95 -0.39 0.04
## Q24 24 1000 4.24 0.99 5 4.42 0.00 0 5 5 -1.57 2.63 0.03
## Q25 25 1000 3.10 1.41 3 3.13 1.48 0 5 5 -0.17 -1.26 0.04
## Q26 26 1000 4.07 1.11 4 4.26 1.48 0 5 5 -1.16 0.64 0.04
nerdy_data %>%
select(Q1:Q26) %>%
alpha()
##
## Reliability analysis
## Call: alpha(x = .)
##
## raw_alpha std.alpha G6(smc) average_r S/N ase mean sd median_r
## 0.85 0.86 0.88 0.19 6 0.0069 3.7 0.59 0.18
##
## 95% confidence boundaries
## lower alpha upper
## Feldt 0.84 0.85 0.86
## Duhachek 0.84 0.85 0.86
##
## Reliability if an item is dropped:
## raw_alpha std.alpha G6(smc) average_r S/N alpha se var.r med.r
## Q1 0.84 0.85 0.88 0.18 5.6 0.0072 0.013 0.17
## Q2 0.84 0.85 0.88 0.19 5.7 0.0071 0.013 0.18
## Q3 0.85 0.85 0.88 0.19 5.8 0.0071 0.013 0.18
## Q4 0.84 0.85 0.88 0.18 5.6 0.0072 0.013 0.17
## Q5 0.84 0.85 0.88 0.18 5.6 0.0072 0.013 0.17
## Q6 0.84 0.85 0.88 0.18 5.6 0.0072 0.013 0.17
## Q7 0.85 0.85 0.88 0.19 5.7 0.0071 0.013 0.18
## Q8 0.84 0.85 0.88 0.19 5.7 0.0072 0.013 0.17
## Q9 0.84 0.85 0.88 0.18 5.6 0.0072 0.012 0.17
## Q10 0.85 0.85 0.88 0.19 5.8 0.0071 0.014 0.17
## Q11 0.85 0.85 0.88 0.19 5.8 0.0071 0.013 0.18
## Q12 0.84 0.85 0.88 0.18 5.6 0.0072 0.013 0.17
## Q13 0.84 0.85 0.87 0.18 5.6 0.0073 0.013 0.17
## Q14 0.84 0.85 0.88 0.18 5.7 0.0072 0.013 0.17
## Q15 0.85 0.85 0.88 0.19 5.9 0.0070 0.013 0.18
## Q16 0.84 0.85 0.88 0.19 5.8 0.0071 0.013 0.18
## Q17 0.84 0.85 0.88 0.18 5.7 0.0072 0.013 0.18
## Q18 0.85 0.86 0.88 0.19 6.0 0.0069 0.013 0.18
## Q19 0.85 0.86 0.88 0.19 5.9 0.0069 0.012 0.18
## Q20 0.84 0.85 0.88 0.19 5.7 0.0072 0.013 0.17
## Q21 0.85 0.85 0.88 0.19 5.9 0.0070 0.013 0.18
## Q22 0.84 0.85 0.88 0.19 5.7 0.0071 0.013 0.18
## Q23 0.84 0.85 0.88 0.19 5.7 0.0072 0.013 0.17
## Q24 0.84 0.85 0.88 0.18 5.7 0.0072 0.013 0.17
## Q25 0.85 0.85 0.88 0.19 5.8 0.0070 0.013 0.18
## Q26 0.84 0.85 0.88 0.18 5.6 0.0072 0.013 0.17
##
## Item statistics
## n raw.r std.r r.cor r.drop mean sd
## Q1 1000 0.52 0.52 0.50 0.46 3.9 1.11
## Q2 1000 0.46 0.45 0.42 0.39 4.0 1.25
## Q3 1000 0.40 0.41 0.38 0.34 4.2 1.06
## Q4 1000 0.53 0.53 0.51 0.46 3.7 1.26
## Q5 1000 0.52 0.53 0.51 0.45 3.8 1.22
## Q6 1000 0.52 0.54 0.51 0.46 3.7 1.20
## Q7 1000 0.43 0.45 0.43 0.37 4.1 1.14
## Q8 1000 0.51 0.49 0.47 0.44 3.8 1.36
## Q9 1000 0.52 0.54 0.53 0.47 3.9 1.11
## Q10 1000 0.42 0.45 0.41 0.37 4.1 1.05
## Q11 1000 0.45 0.41 0.39 0.36 3.1 1.57
## Q12 1000 0.51 0.52 0.50 0.44 3.8 1.25
## Q13 1000 0.57 0.58 0.56 0.51 3.6 1.32
## Q14 1000 0.49 0.50 0.47 0.42 3.6 1.20
## Q15 1000 0.40 0.37 0.33 0.32 2.9 1.51
## Q16 1000 0.46 0.44 0.41 0.38 3.5 1.42
## Q17 1000 0.50 0.50 0.48 0.44 3.9 1.17
## Q18 1000 0.31 0.32 0.26 0.23 3.9 1.35
## Q19 1000 0.36 0.33 0.30 0.27 3.4 1.56
## Q20 1000 0.48 0.49 0.46 0.41 3.6 1.18
## Q21 1000 0.40 0.37 0.33 0.31 3.0 1.63
## Q22 1000 0.45 0.45 0.42 0.38 2.4 1.38
## Q23 1000 0.50 0.50 0.47 0.43 3.9 1.38
## Q24 1000 0.48 0.51 0.48 0.43 4.2 0.99
## Q25 1000 0.39 0.39 0.35 0.31 3.1 1.41
## Q26 1000 0.51 0.53 0.51 0.45 4.1 1.11
##
## Non missing response frequency for each item
## 0 1 2 3 4 5 miss
## Q1 0.00 0.05 0.06 0.17 0.36 0.37 0
## Q2 0.00 0.06 0.08 0.11 0.26 0.48 0
## Q3 0.00 0.04 0.05 0.10 0.33 0.48 0
## Q4 0.00 0.07 0.12 0.17 0.28 0.36 0
## Q5 0.00 0.06 0.10 0.15 0.33 0.35 0
## Q6 0.00 0.06 0.11 0.22 0.31 0.30 0
## Q7 0.01 0.04 0.06 0.10 0.29 0.50 0
## Q8 0.00 0.10 0.10 0.11 0.28 0.42 0
## Q9 0.00 0.03 0.08 0.17 0.33 0.38 0
## Q10 0.00 0.03 0.04 0.18 0.30 0.44 0
## Q11 0.00 0.26 0.10 0.15 0.21 0.28 0
## Q12 0.00 0.06 0.11 0.14 0.31 0.37 0
## Q13 0.00 0.09 0.12 0.22 0.21 0.37 0
## Q14 0.00 0.06 0.12 0.23 0.29 0.29 0
## Q15 0.00 0.26 0.16 0.17 0.20 0.21 0
## Q16 0.00 0.14 0.14 0.17 0.22 0.33 0
## Q17 0.00 0.04 0.10 0.17 0.31 0.37 0
## Q18 0.00 0.10 0.07 0.10 0.26 0.46 0
## Q19 0.00 0.19 0.12 0.13 0.17 0.38 0
## Q20 0.00 0.05 0.12 0.27 0.26 0.29 0
## Q21 0.00 0.30 0.14 0.09 0.20 0.27 0
## Q22 0.00 0.34 0.24 0.17 0.13 0.12 0
## Q23 0.01 0.09 0.11 0.11 0.22 0.47 0
## Q24 0.00 0.02 0.03 0.12 0.32 0.51 0
## Q25 0.00 0.19 0.17 0.19 0.25 0.20 0
## Q26 0.00 0.03 0.08 0.13 0.29 0.47 0
Let’s interpret Cronbach’s α, based on Nunally (1978)’s interpretation guideline.
≥ .90: Excellent (but may indicate item redundancy)
.80 – .89: Good
.70 – .79: Acceptable
.60 – .69: Questionable
.50 – .59: Poor
< .50: Unacceptable
Interpretation: Twenty-six items (Q1 - Q26) of the Nerdy Personality Attributes Scale yielded [put your interpretation!]. The alpha for all 26 items is between .86-ish. This means that the greater the points of data in a test, the higher the accuracy that the items have good internal consistency.
Q1 - Q6 (6 items): Cronbach’s alpha = .7
Q1 - Q16 (16 items): Cronbach’s alpha = .8
Q1 - Q26 (26 items): Cronbach’s alpha =.86
True. This is mostly true as long as the items are somewhat related. Cronbach’s alpha is influenced by 2 things: avergae correlation between items, and number of items on the scale. However, if nbew items are added that are poorly related, then the alpha won’t necessarily go up.
False. Reliability is not independent of the sample or test features. Reliability can change depending on who takes the test, and the variability of the test scores. The more variability–> often higher reliability.Less variability often means lower reliability. Change the test, change the reliability.
We will use the same dataset (Nerdy scale) used in class, “ITLS_4160_Inter_rater_reliability.xlsx.”
Let’s calculate the inter-rater reliability between Rater 1 and Rater 3.
writing_scores <- import("ITLS_4160_Inter_rater_reliability.xlsx")
head(writing_scores)
## Person Rater1 Rater2 Rater3
## 1 1 1 1 1
## 2 2 2 1 3
## 3 3 2 2 3
## 4 4 3 3 4
## 5 5 3 2 4
## 6 6 3 3 4
writing_scores %>%
select(Rater1, Rater3) %>%
describe()
## vars n mean sd median trimmed mad min max range skew kurtosis se
## Rater1 1 10 3.2 1.32 3 3.25 1.48 1 5 4 -0.06 -1.36 0.42
## Rater3 2 10 3.8 1.23 4 4.00 1.48 1 5 4 -0.96 0.00 0.39
writing_scores %>%
select(Rater1, Rater3) %>%
kappa2()
## Cohen's Kappa for 2 Raters (Weights: unweighted)
##
## Subjects = 10
## Raters = 2
## Kappa = 0.241
##
## z = 1.58
## p-value = 0.113
Please interpret inter-rater reliability between Rater 1 and Rater 3 based on Landis & Koch (1977)’s interpretation guideline.
< 0.00 Poor agreement
0.00 – 0.20 Slight agreement
0.21 – 0.40 Fair agreement
0.41 – 0.60 Moderate agreement
0.61 – 0.80 Substantial agreement
0.81 – 1.00 Almost perfect agreement
Interpretation: Cohen’s kappa indicated [put your interpretation] , according to Landis and Koch’s (1977) guideline.
The kappa score between rater1 and rater3 was .24. This indicates a fair agreement. If you have several questions that are supposed to measure one construct, Cronbach’s alpha tells you whether those questions are working together. A score like this is fair, but not substantial or almost perfect. Alpha can be inflated by too many items, very similar, or almost duplicate questions. It is also used well for data that is continuous or a likert-type scale.
We will use the same dataset (Nerdy scale) used in class, “ITLS_4160_Inter_rater_reliability.xlsx.”
Let’s calculate the inter-rater reliability between Rater 2 and Rater 3.
writing_scores %>%
select(Rater2, Rater3) %>%
kappa2()
## Cohen's Kappa for 2 Raters (Weights: unweighted)
##
## Subjects = 10
## Raters = 2
## Kappa = 0.268
##
## z = 1.96
## p-value = 0.0504
Please interpret inter-rater reliability between Rater 2 and Rater 3 based on Landis & Koch (1977)’s interpretation guideline.
< 0.00 Poor agreement
0.00 – 0.20 Slight agreement
0.21 – 0.40 Fair agreement
0.41 – 0.60 Moderate agreement
0.61 – 0.80 Substantial agreement
0.81 – 1.00 Almost perfect agreement
Interpretation: Cohen’s kappa indicated [put your interpretation] , according to Landis and Koch’s (1977) guideline.
Once again the inter-relatability is only .27, or fair agreement. This means that the relatedness of the rater scores are fair in agreement, beyond chance.