##1
#產前自我評估量表,包含500位孕婦作答,其中前十四題是懷孕接受度,後十題是孕期辛勞。

#
dat<-read.table('prenatal3.txt', header=T)
str(dat)
## 'data.frame':    500 obs. of  24 variables:
##  $ A1 : int  2 3 1 3 1 1 3 3 1 4 ...
##  $ A2 : int  2 2 1 3 2 2 3 4 1 2 ...
##  $ A3 : int  2 1 4 3 3 1 2 3 1 2 ...
##  $ A4 : int  2 3 2 2 1 1 3 3 1 1 ...
##  $ A5 : int  1 1 2 1 1 2 3 1 1 2 ...
##  $ A6 : int  1 1 1 2 1 1 3 1 1 1 ...
##  $ A7 : int  3 1 2 1 3 3 4 4 2 2 ...
##  $ A8 : int  1 1 1 1 2 1 4 1 1 4 ...
##  $ A9 : int  3 1 2 4 2 1 4 1 1 2 ...
##  $ A10: int  1 1 1 1 2 1 4 1 1 1 ...
##  $ A11: int  2 1 2 1 3 2 4 2 1 2 ...
##  $ A12: int  1 1 1 1 2 1 4 3 1 1 ...
##  $ A13: int  1 1 1 2 2 1 4 3 1 2 ...
##  $ A14: int  2 1 2 1 2 2 3 2 1 2 ...
##  $ B1 : int  1 1 1 2 1 2 1 2 1 2 ...
##  $ B2 : int  3 2 3 4 4 3 4 1 1 1 ...
##  $ B3 : int  3 1 3 1 1 1 4 2 1 3 ...
##  $ B4 : int  3 2 3 3 1 1 4 2 1 4 ...
##  $ B5 : int  1 2 2 1 1 3 4 2 1 1 ...
##  $ B6 : int  3 1 3 1 1 2 4 4 1 3 ...
##  $ B7 : int  4 3 3 1 1 3 3 2 1 1 ...
##  $ B8 : int  4 2 2 1 1 2 3 2 1 1 ...
##  $ B9 : int  4 2 2 2 2 2 4 2 1 2 ...
##  $ B10: int  2 2 2 1 3 2 4 2 2 1 ...
#遺漏比例,500為樣本數
show(apply(apply(dat, 2, is.na), 2, sum)/500)
##  A1  A2  A3  A4  A5  A6  A7  A8  A9 A10 A11 A12 A13 A14  B1  B2  B3  B4 
##   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0 
##  B5  B6  B7  B8  B9 B10 
##   0   0   0   0   0   0
#每題的四個選項分配
show(dat4<-apply(dat, 2, table)/500)
##      A1    A2    A3    A4    A5    A6    A7    A8    A9   A10   A11   A12
## 1 0.430 0.508 0.280 0.416 0.676 0.796 0.364 0.696 0.398 0.696 0.520 0.736
## 2 0.274 0.328 0.376 0.294 0.188 0.130 0.302 0.156 0.342 0.202 0.274 0.156
## 3 0.194 0.112 0.230 0.222 0.104 0.042 0.224 0.054 0.172 0.052 0.166 0.046
## 4 0.102 0.052 0.114 0.068 0.032 0.032 0.110 0.094 0.088 0.050 0.040 0.062
##     A13  A14    B1    B2    B3    B4    B5    B6    B7    B8    B9   B10
## 1 0.486 0.59 0.734 0.272 0.362 0.180 0.502 0.486 0.288 0.362 0.352 0.436
## 2 0.316 0.26 0.208 0.270 0.418 0.352 0.310 0.344 0.346 0.372 0.414 0.288
## 3 0.110 0.12 0.044 0.266 0.188 0.330 0.154 0.140 0.286 0.208 0.188 0.200
## 4 0.088 0.03 0.014 0.192 0.032 0.138 0.034 0.030 0.080 0.058 0.046 0.076
library(reshape2)
#將每題的四個選項分配由寬變長,並設定成資料表格
dat4<-melt(dat4)
names(dat4)<-c('選項','題目','比率')

#製作題目編號
dat4$題目編號 <- rep(1:24, rep(4,24))

require(plyr)
## Loading required package: plyr
#mapvalues用來更改變項中的值
dat4$選項 <- mapvalues(dat4$選項, from = c(1,2,3,4), to = c("一點也不","稍微如此","有些如此","的確如此"))
dat4$選項 <- ordered(dat4$選項,c("一點也不","稍微如此","有些如此","的確如此"))

#準備畫圖,更改並記錄下目前配色主題
require(ggplot2)
## Loading required package: ggplot2
old <- theme_set(theme_bw())

#分量表一:前14題
# ymin=由哪個基準連結比率 ymax=連結止的位置
ggplot(data = subset(dat4, 題目編號 < 15), aes(x = reorder(題目, 比率, max), 
                                           y = 比率, ymin = 比率, ymax = 0.25, group = 題目)) +
  geom_pointrange() +
  geom_hline(yintercept = 0.25, linetype = "dotted") +
  facet_grid(. ~ 選項) +
  coord_flip() +
  labs(x = "題目", y = "選項比率", title = "懷孕接受度") 

#分量表二:後10題
ggplot(data = subset(dat4, 題目編號 > 14 ),  aes(x = reorder(題目, 比率, max), 
                                             y = 比率, ymin = 0.25, ymax = 比率, group = 題目)) +
  geom_pointrange() +
  geom_hline(yintercept = 0.25, linetype = "dotted") +
  facet_grid(. ~ 選項) +
  coord_flip() +
  labs(x = "題目", y = "選項比率", title = "孕期辛勞") 

#回復配色主題
theme_set(old)

#定義一個函數,可以同時計算題目的平均數、標準差、偏態與峰度
require(moments)
## Loading required package: moments
my_summary <- function(x) {
  funs <- c(mean, sd, skewness, kurtosis)
  sapply(funs, function(f) f(x, na.rm = TRUE))
}

#一次算完所有題目前四級動差,並存成資料檔
#2=MARGIN表示要将函数FUN应用到X的行还是列,若为1表示取行,为2表示取列,为c(1,2)表示行、列都计算。
dta_desc <- apply(dat, 2, my_summary)
rownames(dta_desc) <- c("平均", "標準差", "偏態", "峰度")
#t=轉置
rslt1 <- as.data.frame(t(dta_desc))
#小數點3位
round(rslt1,3)
##      平均 標準差  偏態  峰度
## A1  1.968  1.016 0.649 2.198
## A2  1.708  0.863 1.087 3.403
## A3  2.178  0.968 0.396 2.178
## A4  1.942  0.953 0.588 2.221
## A5  1.492  0.807 1.537 4.394
## A6  1.310  0.701 2.463 8.562
## A7  2.080  1.012 0.479 2.061
## A8  1.546  0.960 1.651 4.395
## A9  1.950  0.960 0.699 2.469
## A10 1.456  0.808 1.863 5.733
## A11 1.726  0.879 0.916 2.781
## A12 1.434  0.843 1.994 5.970
## A13 1.800  0.954 1.018 3.019
## A14 1.590  0.814 1.213 3.573
## B1  1.338  0.630 2.001 6.948
## B2  2.378  1.080 0.125 1.741
## B3  1.890  0.817 0.558 2.586
## B4  2.426  0.939 0.062 2.114
## B5  1.720  0.846 0.905 2.885
## B6  1.714  0.816 0.900 3.017
## B7  2.158  0.933 0.274 2.096
## B8  1.962  0.896 0.560 2.422
## B9  1.928  0.849 0.590 2.627
## B10 1.916  0.967 0.674 2.317
#準備畫圖,改成長形資料
dtal_desc <- melt(dta_desc)
names(dtal_desc)[1:2] <- c("動差", "題目")
head(dtal_desc)
##     動差 題目     value
## 1   平均   A1 1.9680000
## 2 標準差   A1 1.0163868
## 3   偏態   A1 0.6486360
## 4   峰度   A1 2.1982803
## 5   平均   A2 1.7080000
## 6 標準差   A2 0.8626844
#換配色主題,並把舊的存起來
old <- theme_set(theme_minimal())

#繪製所有題目平均數
#加上垂直虛線表示-1.5,0,1.5的標準差
ggplot(data = subset(dtal_desc, 動差 == "平均"), 
       aes(x = reorder(題目, value, max), y = value, group = 動差)) +
  geom_point(size = 3)+
  geom_hline(yintercept = mean(t(dta_desc["平均",])) + 
               c(-1.5, 0, 1.5) * sd(t(dta_desc["平均",])), linetype = "dashed") +
  coord_flip() +
  labs(x = "題目", y = "平均") 

#繪製所有題目標準差
#圖6.2
ggplot(data = subset(dtal_desc, 動差 == "標準差"), 
       aes(x = reorder(題目, value, max), y = value, group = 動差)) +
  geom_point(size = 3)+
  geom_hline(yintercept = mean(t(dta_desc["標準差",])) + 
               c(-1.5, 0, 1.5) * sd(t(dta_desc["標準差",])), linetype = "dashed") +
  coord_flip() +
  labs(x = "題目", y = "標準差") 

#繪製所有題目偏態
ggplot(data = subset(dtal_desc, 動差 == "偏態"), 
       aes(x = reorder(題目, value, max), y = value, group = 動差)) +
  geom_point(size = 3)+
  geom_hline(yintercept = mean(t(dta_desc["偏態",])) + 
               c(-1.5, 0, 1.5) * sd(t(dta_desc["偏態",])), linetype = "dashed") +
  coord_flip() +
  labs(x = "題目", y = "偏態") 

#繪製所有題目峰度
ggplot(data = subset(dtal_desc, 動差 == "峰度"), 
       aes(x = reorder(題目, value, max), y = value, group = 動差)) +
  geom_point(size = 3)+
  geom_hline(yintercept = mean(t(dta_desc["峰度",])) + 
               c(-1.5, 0, 1.5) * sd(t(dta_desc["峰度",])), linetype = "dashed") +
  coord_flip() +
  labs(x = "題目", y = "峰度") 

#恢復配色主題
theme_set(old)


#計算區辨度。以總分為準,選取低分組與高分組,比較各題在兩組上的差異。
dat$tot <- apply(dat, 1, sum)
dat$grp <- NA
dat$grp[rank(dat$tot) < 500*.27] <- "L"
dat$grp[rank(dat$tot) > 500*.73] <- "H"
dat$grp <- factor(dat$grp)

#算高低分組平均數
#以grp為分組(列),算出24個題目(欄)的平均值
dtam <- aggregate(dat[,1:24], by=list(dat$grp), mean)

#第一欄沒有用,刪掉
dtam <- t(dtam[,-1])

#t檢定
item_t <- sapply(dat[,1:24], function(x) t.test(x ~ dat$grp)$statistic)

#將計算結果存於新資料框架rslt2中
rslt2 <- data.frame(Item=rownames(dtam),m.l=dtam[,2], m.h=dtam[,1], m.dif=dtam[,1]-dtam[,2], t.stat=item_t)

#畫出t檢定結果
#圖6.3
ggplot(data = rslt2, aes(x=reorder(Item, t.stat, max), y=t.stat)) +
  geom_point() +
  geom_hline(yintercept = 2, linetype="dashed") +
  coord_flip() +
  labs(x = "題目", y = "t檢定值") +
  theme_bw()

#整理資料、命名欄位並四捨五入取至小數點後第3位
rslt2 <- rslt2[,-1]
names(rslt2) <- c('低分組平均','高分組平均','差異','t檢定')
round(rslt2,3)
##     低分組平均 高分組平均  差異  t檢定
## A1       1.444      2.754 1.310 12.682
## A2       1.239      2.385 1.145 12.238
## A3       1.711      2.585 0.873  7.760
## A4       1.204      2.946 1.742 22.711
## A5       1.049      2.346 1.297 14.903
## A6       1.007      1.746 0.739  9.598
## A7       1.430      2.800 1.370 13.032
## A8       1.049      2.415 1.366 13.444
## A9       1.479      2.385 0.906  8.251
## A10      1.085      2.108 1.023 10.606
## A11      1.077      2.592 1.515 19.152
## A12      1.042      2.192 1.150 12.000
## A13      1.155      2.523 1.368 14.245
## A14      1.056      2.469 1.413 17.881
## B1       1.127      1.623 0.496  6.231
## B2       2.183      2.392 0.209  1.559
## B3       1.310      2.538 1.229 15.139
## B4       1.789      2.938 1.150 11.899
## B5       1.218      2.392 1.174 13.037
## B6       1.162      2.431 1.269 14.954
## B7       1.542      2.708 1.165 11.971
## B8       1.289      2.600 1.311 14.780
## B9       1.338      2.508 1.170 13.262
## B10      1.204      2.700 1.496 16.256
#利用psychometrics套件,計算題目與總分相關
require(psychometric)
## Loading required package: psychometric
## Warning: package 'psychometric' was built under R version 3.4.3
## Loading required package: multilevel
## Warning: package 'multilevel' was built under R version 3.4.3
## Loading required package: nlme
## Loading required package: MASS
## 
## Attaching package: 'psychometric'
## The following object is masked from 'package:ggplot2':
## 
##     alpha
itotr <- item.exam(dat[, 1:24], discrim = TRUE)

#將資料轉換為包含三個資料框架的列
ldta <- list(x = dat[, 1:14], y = dat[,15:24])

#計算題目與分量表總分相關
isubr <- lapply(ldta, item.exam, discrim = TRUE)

#整理分析結果並存檔
rslt3 <- t(rbind(itotr$Item.Tot.woi, 
                 c(isubr$x$Item.Tot.woi, isubr$y$Item.Tot.woi, isubr$z$Item.Tot.woi) ) )

#呈現第三部分
rslt3 <- data.frame(rslt3)
names(rslt3) <- c('題目總分相關','題目分量表相關')
row.names(rslt3) <- names(dat[,1:24])
round(rslt3, 3)
##     題目總分相關 題目分量表相關
## A1         0.481          0.533
## A2         0.493          0.477
## A3         0.301          0.283
## A4         0.692          0.679
## A5         0.646          0.687
## A6         0.395          0.473
## A7         0.478          0.449
## A8         0.543          0.627
## A9         0.307          0.360
## A10        0.505          0.558
## A11        0.656          0.614
## A12        0.553          0.660
## A13        0.542          0.600
## A14        0.667          0.669
## B1         0.244          0.224
## B2         0.011         -0.031
## B3         0.567          0.645
## B4         0.412          0.600
## B5         0.521          0.501
## B6         0.580          0.566
## B7         0.417          0.569
## B8         0.504          0.629
## B9         0.500          0.628
## B10        0.547          0.506
#題目信度
isubrel <- c(isubr$x$Item.Rel.woi, isubr$y$Item.Rel.woi, isubr$z$Item.Rel.woi)

#卸下psychometrics套件
detach("package:psychometric", unload = TRUE)

#載入psych,看總量表與分量表的 Cronbach alpha
require(psych)
## Loading required package: psych
## 
## Attaching package: 'psych'
## The following objects are masked from 'package:ggplot2':
## 
##     %+%, alpha
#題目刪除後全量表信度變化
itotalpha <- alpha(dat[, 1:24])$alpha.drop[,'raw_alpha']
## Warning in alpha(dat[, 1:24]): Some items were negatively correlated with the total scale and probably 
## should be reversed.  
## To do this, run the function again with the 'check.keys=TRUE' option
## Some items ( B2 ) were negatively correlated with the total scale and 
## probably should be reversed.  
## To do this, run the function again with the 'check.keys=TRUE' option
#題目刪除後分量表信度變化
isubalpha <- lapply(ldta, alpha)
## Warning in FUN(X[[i]], ...): Some items were negatively correlated with the total scale and probably 
## should be reversed.  
## To do this, run the function again with the 'check.keys=TRUE' option
## Some items ( B2 ) were negatively correlated with the total scale and 
## probably should be reversed.  
## To do this, run the function again with the 'check.keys=TRUE' option
ialphad <- c(isubalpha$x$alpha.drop[,'raw_alpha'],
             isubalpha$y$alpha.drop[,'raw_alpha'])

#把分析結果集中
rslt4 <- as.data.frame(t(rbind(itotalpha, ialphad, isubrel)))
names(rslt4) <- c('總量表信度(刪題)', '分量表信度(刪題)', '題目信度')

#加上題目名字、顯示三位
#程式報表6.5
row.names(rslt4) <- names(dat[,1:24])
round(rslt4, 3)
##     總量表信度(刪題) 分量表信度(刪題) 題目信度
## A1               0.886              0.869    0.541
## A2               0.886              0.871    0.411
## A3               0.891              0.881    0.274
## A4               0.880              0.861    0.647
## A5               0.882              0.862    0.554
## A6               0.888              0.871    0.331
## A7               0.886              0.873    0.454
## A8               0.884              0.863    0.602
## A9               0.891              0.878    0.345
## A10              0.886              0.867    0.451
## A11              0.882              0.864    0.539
## A12              0.884              0.862    0.556
## A13              0.885              0.865    0.572
## A14              0.882              0.862    0.544
## B1               0.891              0.800    0.141
## B2               0.900              0.845   -0.033
## B3               0.884              0.758    0.527
## B4               0.888              0.761    0.563
## B5               0.885              0.774    0.424
## B6               0.884              0.767    0.461
## B7               0.888              0.765    0.530
## B8               0.886              0.758    0.563
## B9               0.886              0.759    0.533
## B10              0.884              0.773    0.489
#因素分析
#根據量表設計理念,因素數為 2
#程式報表6.6
require(psych)
print.psych(fa(dat[, 1:24], nfactor = 2, fm = "pa", rotate = "promax"), cut = .3)
## Loading required namespace: GPArotation
## Factor Analysis using method =  pa
## Call: fa(r = dat[, 1:24], nfactors = 2, rotate = "promax", fm = "pa")
## Standardized loadings (pattern matrix) based upon correlation matrix
##       PA1   PA2     h2   u2 com
## A1   0.58       0.3576 0.64 1.0
## A2   0.33       0.2640 0.74 1.9
## A3              0.0831 0.92 1.8
## A4   0.54  0.33 0.5662 0.43 1.7
## A5   0.70       0.5782 0.42 1.1
## A6   0.63       0.3291 0.67 1.1
## A7         0.35 0.2700 0.73 1.8
## A8   0.78       0.5331 0.47 1.0
## A9   0.42       0.1481 0.85 1.1
## A10  0.60       0.3583 0.64 1.0
## A11  0.46  0.37 0.5103 0.49 1.9
## A12  0.83       0.5919 0.41 1.1
## A13  0.60       0.3832 0.62 1.0
## A14  0.56       0.5458 0.45 1.5
## B1              0.0819 0.92 1.2
## B2              0.0096 0.99 2.0
## B3         0.73 0.5279 0.47 1.0
## B4         0.73 0.4373 0.56 1.1
## B5         0.49 0.3547 0.65 1.3
## B6         0.57 0.4401 0.56 1.2
## B7         0.71 0.4229 0.58 1.1
## B8         0.74 0.4902 0.51 1.0
## B9         0.71 0.4607 0.54 1.0
## B10        0.50 0.3828 0.62 1.3
## 
##                        PA1  PA2
## SS loadings           4.81 4.32
## Proportion Var        0.20 0.18
## Cumulative Var        0.20 0.38
## Proportion Explained  0.53 0.47
## Cumulative Proportion 0.53 1.00
## 
##  With factor correlations of 
##      PA1  PA2
## PA1 1.00 0.46
## PA2 0.46 1.00
## 
## Mean item complexity =  1.3
## Test of the hypothesis that 2 factors are sufficient.
## 
## The degrees of freedom for the null model are  276  and the objective function was  9.86 with Chi Square of  4832.6
## The degrees of freedom for the model are 229  and the objective function was  1.85 
## 
## The root mean square of the residuals (RMSR) is  0.06 
## The df corrected root mean square of the residuals is  0.06 
## 
## The harmonic number of observations is  500 with the empirical chi square  890.78  with prob <  9e-79 
## The total number of observations was  500  with Likelihood Chi Square =  905.57  with prob <  3.6e-81 
## 
## Tucker Lewis Index of factoring reliability =  0.821
## RMSEA index =  0.078  and the 90 % confidence intervals are  0.072 0.082
## BIC =  -517.58
## Fit based upon off diagonal values = 0.97
## Measures of factor score adequacy             
##                                                    PA1  PA2
## Correlation of (regression) scores with factors   0.95 0.94
## Multiple R square of scores with factors          0.90 0.89
## Minimum correlation of possible factor scores     0.80 0.77
#平行分析探索因素數
#圖6.4
fa.parallel(dat[, 1:24], fa = "pc", show.legend = FALSE)

## Parallel analysis suggests that the number of factors =  NA  and the number of components =  3
#設定新因素數是3,看看因素結構
#程式報表6.7
print.psych(fa(dat[, 1:24], nfactor = 3, fm = "pa", rotate = "promax"), cut = .3)
## Factor Analysis using method =  pa
## Call: fa(r = dat[, 1:24], nfactors = 3, rotate = "promax", fm = "pa")
## Standardized loadings (pattern matrix) based upon correlation matrix
##       PA1   PA2   PA3    h2   u2 com
## A1   0.58             0.384 0.62 1.4
## A2   0.38             0.266 0.73 1.4
## A3               0.30 0.144 0.86 1.9
## A4   0.73             0.627 0.37 1.0
## A5   0.70             0.608 0.39 1.3
## A6               0.56 0.378 0.62 1.3
## A7   0.39             0.277 0.72 1.4
## A8   0.37        0.56 0.546 0.45 1.9
## A9               0.62 0.346 0.65 1.2
## A10              0.60 0.461 0.54 1.1
## A11  0.81             0.648 0.35 1.0
## A12  0.41        0.58 0.598 0.40 2.0
## A13              0.51 0.430 0.57 1.4
## A14  0.80             0.650 0.35 1.0
## B1   0.37             0.132 0.87 1.4
## B2                    0.051 0.95 1.9
## B3         0.66       0.545 0.45 1.1
## B4         0.75       0.501 0.50 1.0
## B5   0.41  0.30       0.364 0.64 1.9
## B6   0.31  0.43       0.436 0.56 1.9
## B7         0.74       0.490 0.51 1.0
## B8         0.69       0.518 0.48 1.0
## B9         0.65       0.474 0.53 1.0
## B10  0.46             0.402 0.60 1.7
## 
##                        PA1  PA2  PA3
## SS loadings           4.57 3.12 2.59
## Proportion Var        0.19 0.13 0.11
## Cumulative Var        0.19 0.32 0.43
## Proportion Explained  0.44 0.30 0.25
## Cumulative Proportion 0.44 0.75 1.00
## 
##  With factor correlations of 
##      PA1  PA2  PA3
## PA1 1.00 0.50 0.39
## PA2 0.50 1.00 0.22
## PA3 0.39 0.22 1.00
## 
## Mean item complexity =  1.4
## Test of the hypothesis that 3 factors are sufficient.
## 
## The degrees of freedom for the null model are  276  and the objective function was  9.86 with Chi Square of  4832.6
## The degrees of freedom for the model are 207  and the objective function was  1.07 
## 
## The root mean square of the residuals (RMSR) is  0.04 
## The df corrected root mean square of the residuals is  0.04 
## 
## The harmonic number of observations is  500 with the empirical chi square  373.94  with prob <  1e-11 
## The total number of observations was  500  with Likelihood Chi Square =  520.01  with prob <  7e-29 
## 
## Tucker Lewis Index of factoring reliability =  0.908
## RMSEA index =  0.056  and the 90 % confidence intervals are  0.049 0.061
## BIC =  -766.41
## Fit based upon off diagonal values = 0.99
## Measures of factor score adequacy             
##                                                    PA1  PA2  PA3
## Correlation of (regression) scores with factors   0.95 0.93 0.90
## Multiple R square of scores with factors          0.90 0.86 0.80
## Minimum correlation of possible factor scores     0.81 0.72 0.61
#定義分量表
ldta2 <- list(x = dat[c(4,5,11,14)] , y=dat[c(17,18,19,20)] , z=dat[c(6,9,10,13)])
#分量表信度
lapply(ldta2, alpha)
## $x
## 
## Reliability analysis   
## Call: FUN(x = X[[i]])
## 
##   raw_alpha std.alpha G6(smc) average_r S/N    ase mean   sd
##       0.87      0.87    0.83      0.62 6.5 0.0098  1.7 0.73
## 
##  lower alpha upper     95% confidence boundaries
## 0.85 0.87 0.88 
## 
##  Reliability if an item is dropped:
##     raw_alpha std.alpha G6(smc) average_r S/N alpha se
## A4       0.82      0.82    0.76      0.60 4.6    0.014
## A5       0.85      0.86    0.80      0.66 5.9    0.011
## A11      0.83      0.83    0.77      0.62 4.8    0.013
## A14      0.81      0.81    0.74      0.59 4.3    0.014
## 
##  Item statistics 
##       n raw.r std.r r.cor r.drop mean   sd
## A4  500  0.87  0.86  0.79   0.74  1.9 0.95
## A5  500  0.80  0.80  0.70   0.65  1.5 0.81
## A11 500  0.85  0.85  0.78   0.72  1.7 0.88
## A14 500  0.87  0.87  0.82   0.76  1.6 0.81
## 
## Non missing response frequency for each item
##        1    2    3    4 miss
## A4  0.42 0.29 0.22 0.07    0
## A5  0.68 0.19 0.10 0.03    0
## A11 0.52 0.27 0.17 0.04    0
## A14 0.59 0.26 0.12 0.03    0
## 
## $y
## 
## Reliability analysis   
## Call: FUN(x = X[[i]])
## 
##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean   sd
##       0.76      0.77    0.72      0.45 3.3 0.017  1.9 0.66
## 
##  lower alpha upper     95% confidence boundaries
## 0.73 0.76 0.8 
## 
##  Reliability if an item is dropped:
##    raw_alpha std.alpha G6(smc) average_r S/N alpha se
## B3      0.67      0.68    0.58      0.41 2.1    0.025
## B4      0.72      0.72    0.63      0.46 2.5    0.022
## B5      0.74      0.74    0.67      0.49 2.8    0.021
## B6      0.71      0.71    0.63      0.45 2.5    0.022
## 
##  Item statistics 
##      n raw.r std.r r.cor r.drop mean   sd
## B3 500  0.80  0.81  0.73   0.64  1.9 0.82
## B4 500  0.78  0.76  0.65   0.56  2.4 0.94
## B5 500  0.73  0.73  0.58   0.51  1.7 0.85
## B6 500  0.76  0.77  0.65   0.56  1.7 0.82
## 
## Non missing response frequency for each item
##       1    2    3    4 miss
## B3 0.36 0.42 0.19 0.03    0
## B4 0.18 0.35 0.33 0.14    0
## B5 0.50 0.31 0.15 0.03    0
## B6 0.49 0.34 0.14 0.03    0
## 
## $z
## 
## Reliability analysis   
## Call: FUN(x = X[[i]])
## 
##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean   sd
##       0.71      0.72    0.66      0.39 2.5 0.021  1.6 0.63
## 
##  lower alpha upper     95% confidence boundaries
## 0.67 0.71 0.75 
## 
##  Reliability if an item is dropped:
##     raw_alpha std.alpha G6(smc) average_r S/N alpha se
## A6       0.68      0.68    0.59      0.42 2.2    0.025
## A9       0.68      0.69    0.60      0.43 2.2    0.024
## A10      0.60      0.60    0.51      0.34 1.5    0.030
## A13      0.63      0.64    0.56      0.37 1.8    0.028
## 
##  Item statistics 
##       n raw.r std.r r.cor r.drop mean   sd
## A6  500  0.66  0.71  0.55   0.45  1.3 0.70
## A9  500  0.73  0.70  0.53   0.45  1.9 0.96
## A10 500  0.78  0.79  0.70   0.59  1.5 0.81
## A13 500  0.77  0.75  0.62   0.53  1.8 0.95
## 
## Non missing response frequency for each item
##        1    2    3    4 miss
## A6  0.80 0.13 0.04 0.03    0
## A9  0.40 0.34 0.17 0.09    0
## A10 0.70 0.20 0.05 0.05    0
## A13 0.49 0.32 0.11 0.09    0
#總量表信度
#程式報表6.10
require(psych)
dat<-read.table('prenatal3.txt', header=T)
omega(dat, nfactor = 3)
## Warning in fac(r = r, nfactors = nfactors, n.obs = n.obs, rotate =
## rotate, : A loading greater than abs(1) was detected. Examine the loadings
## carefully.
## The estimated weights for the factor scores are probably incorrect.  Try a different factor extraction method.
## Warning in fac(r = r, nfactors = nfactors, n.obs = n.obs, rotate =
## rotate, : An ultra-Heywood case was detected. Examine the results carefully
## Warning in cov2cor(t(w) %*% r %*% w): diag(.) had 0 or NA entries; non-
## finite result is doubtful

## Omega 
## Call: omega(m = dat, nfactors = 3)
## Alpha:                 0.89 
## G.6:                   0.92 
## Omega Hierarchical:    0.69 
## Omega H asymptotic:    0.75 
## Omega Total            0.92 
## 
## Schmid Leiman Factor loadings greater than  0.2 
##         g   F1*   F2*   F3*   h2   u2   p2
## A1   0.58                   0.38 0.62 0.89
## A2   0.48                   0.27 0.73 0.87
## A3               0.22  0.28 0.14 0.86 0.15
## A4   0.78                   0.63 0.37 0.98
## A5   0.74              0.23 0.61 0.39 0.91
## A6   0.33              0.52 0.38 0.62 0.28
## A7   0.49                   0.28 0.72 0.86
## A8   0.51              0.52 0.55 0.45 0.48
## A9                     0.57 0.35 0.65 0.03
## A10  0.37              0.56 0.46 0.54 0.30
## A11  0.80                   0.65 0.35 0.99
## A12  0.54              0.54 0.60 0.40 0.49
## A13  0.44              0.48 0.43 0.57 0.45
## A14  0.81                   0.65 0.35 1.00
## B1   0.33                   0.13 0.87 0.82
## B2-                   -0.21 0.05 0.95 0.13
## B3   0.42        0.60       0.55 0.45 0.33
## B4   0.23        0.67       0.50 0.50 0.10
## B5   0.52        0.29       0.36 0.64 0.75
## B6   0.51        0.41       0.44 0.56 0.60
## B7   0.23        0.66       0.49 0.51 0.11
## B8   0.35        0.63       0.52 0.48 0.23
## B9   0.35        0.59       0.47 0.53 0.26
## B10  0.56        0.29       0.40 0.60 0.78
## 
## With eigenvalues of:
##   g F1* F2* F3* 
## 5.8 0.0 2.5 2.0 
## 
## general/max  2.3   max/min =   Inf
## mean percent general =  0.53    with sd =  0.33 and cv of  0.63 
## Explained Common Variance of the general factor =  0.56 
## 
## The degrees of freedom are 207  and the fit is  1.07 
## The number of observations was  500  with Chi Square =  520.01  with prob <  7e-29
## The root mean square of the residuals is  0.04 
## The df corrected root mean square of the residuals is  0.04
## RMSEA index =  0.056  and the 10 % confidence intervals are  0.049 0.061
## BIC =  -766.42
## 
## Compare this with the adequacy of just a general factor and no group factors
## The degrees of freedom for just the general factor are 252  and the fit is  3.87 
## The number of observations was  500  with Chi Square =  1892.57  with prob <  6.6e-249
## The root mean square of the residuals is  0.13 
## The df corrected root mean square of the residuals is  0.14 
## 
## RMSEA index =  0.115  and the 10 % confidence intervals are  0.109 0.119
## BIC =  326.49 
## 
## Measures of factor score adequacy             
##                                                  g F1*  F2*  F3*
## Correlation of scores with factors            0.95   0 0.90 0.86
## Multiple R square of scores with factors      0.90   0 0.81 0.74
## Minimum correlation of factor score estimates 0.79  -1 0.61 0.48
## 
##  Total, General and Subset omega for each subset
##                                                  g F1*  F2*  F3*
## Omega total for total scores and subscales    0.92  NA 0.89 0.84
## Omega general for total scores and subscales  0.69  NA 0.54 0.56
## Omega group for total scores and subscales    0.21  NA 0.35 0.28