library(stats)
crime <- read.table("uscrime.txt", header = T)
head(crime)
## M So Ed Po1 Po2 LF M.F Pop NW U1 U2 Wealth Ineq Prob
## 1 15.1 1 9.1 5.8 5.6 0.510 95.0 33 30.1 0.108 4.1 3940 26.1 0.084602
## 2 14.3 0 11.3 10.3 9.5 0.583 101.2 13 10.2 0.096 3.6 5570 19.4 0.029599
## 3 14.2 1 8.9 4.5 4.4 0.533 96.9 18 21.9 0.094 3.3 3180 25.0 0.083401
## 4 13.6 0 12.1 14.9 14.1 0.577 99.4 157 8.0 0.102 3.9 6730 16.7 0.015801
## 5 14.1 0 12.1 10.9 10.1 0.591 98.5 18 3.0 0.091 2.0 5780 17.4 0.041399
## 6 12.1 0 11.0 11.8 11.5 0.547 96.4 25 4.4 0.084 2.9 6890 12.6 0.034201
## Time Crime
## 1 26.2011 791
## 2 25.2999 1635
## 3 24.3006 578
## 4 29.9012 1969
## 5 21.2998 1234
## 6 20.9995 682
set.seed(1)
Using PCA function from the stats library, we can use PCA on the predictors of our crime dataset. Based on the eigen values of our pca object, we can use an elbow plot to determine how many principle components to use our analysis.
crime.pca <- prcomp(crime[,1:15], center= T, scale = T)
crime.pca
## Standard deviations (1, .., p=15):
## [1] 2.45335539 1.67387187 1.41596057 1.07805742 0.97892746 0.74377006
## [7] 0.56729065 0.55443780 0.48492813 0.44708045 0.41914843 0.35803646
## [13] 0.26332811 0.24180109 0.06792764
##
## Rotation (n x k) = (15 x 15):
## PC1 PC2 PC3 PC4 PC5
## M -0.30371194 0.06280357 0.1724199946 -0.02035537 -0.35832737
## So -0.33088129 -0.15837219 0.0155433104 0.29247181 -0.12061130
## Ed 0.33962148 0.21461152 0.0677396249 0.07974375 -0.02442839
## Po1 0.30863412 -0.26981761 0.0506458161 0.33325059 -0.23527680
## Po2 0.31099285 -0.26396300 0.0530651173 0.35192809 -0.20473383
## LF 0.17617757 0.31943042 0.2715301768 -0.14326529 -0.39407588
## M.F 0.11638221 0.39434428 -0.2031621598 0.01048029 -0.57877443
## Pop 0.11307836 -0.46723456 0.0770210971 -0.03210513 -0.08317034
## NW -0.29358647 -0.22801119 0.0788156621 0.23925971 -0.36079387
## U1 0.04050137 0.00807439 -0.6590290980 -0.18279096 -0.13136873
## U2 0.01812228 -0.27971336 -0.5785006293 -0.06889312 -0.13499487
## Wealth 0.37970331 -0.07718862 0.0100647664 0.11781752 0.01167683
## Ineq -0.36579778 -0.02752240 -0.0002944563 -0.08066612 -0.21672823
## Prob -0.25888661 0.15831708 -0.1176726436 0.49303389 0.16562829
## Time -0.02062867 -0.38014836 0.2235664632 -0.54059002 -0.14764767
## PC6 PC7 PC8 PC9 PC10 PC11
## M -0.449132706 -0.15707378 -0.55367691 0.15474793 -0.01443093 0.39446657
## So -0.100500743 0.19649727 0.22734157 -0.65599872 0.06141452 0.23397868
## Ed -0.008571367 -0.23943629 -0.14644678 -0.44326978 0.51887452 -0.11821954
## Po1 -0.095776709 0.08011735 0.04613156 0.19425472 -0.14320978 -0.13042001
## Po2 -0.119524780 0.09518288 0.03168720 0.19512072 -0.05929780 -0.13885912
## LF 0.504234275 -0.15931612 0.25513777 0.14393498 0.03077073 0.38532827
## M.F -0.074501901 0.15548197 -0.05507254 -0.24378252 -0.35323357 -0.28029732
## Pop 0.547098563 0.09046187 -0.59078221 -0.20244830 -0.03970718 0.05849643
## NW 0.051219538 -0.31154195 0.20432828 0.18984178 0.49201966 -0.20695666
## U1 0.017385981 -0.17354115 -0.20206312 0.02069349 0.22765278 -0.17857891
## U2 0.048155286 -0.07526787 0.24369650 0.05576010 -0.04750100 0.47021842
## Wealth -0.154683104 -0.14859424 0.08630649 -0.23196695 -0.11219383 0.31955631
## Ineq 0.272027031 0.37483032 0.07184018 -0.02494384 -0.01390576 -0.18278697
## Prob 0.283535996 -0.56159383 -0.08598908 -0.05306898 -0.42530006 -0.08978385
## Time -0.148203050 -0.44199877 0.19507812 -0.23551363 -0.29264326 -0.26363121
## PC12 PC13 PC14 PC15
## M 0.16580189 -0.05142365 0.04901705 0.0051398012
## So -0.05753357 -0.29368483 -0.29364512 0.0084369230
## Ed 0.47786536 0.19441949 0.03964277 -0.0280052040
## Po1 0.22611207 -0.18592255 -0.09490151 -0.6894155129
## Po2 0.19088461 -0.13454940 -0.08259642 0.7200270100
## LF 0.02705134 -0.27742957 -0.15385625 0.0336823193
## M.F -0.23925913 0.31624667 -0.04125321 0.0097922075
## Pop -0.18350385 0.12651689 -0.05326383 0.0001496323
## NW -0.36671707 0.22901695 0.13227774 -0.0370783671
## U1 -0.09314897 -0.59039450 -0.02335942 0.0111359325
## U2 0.28440496 0.43292853 -0.03985736 0.0073618948
## Wealth -0.32172821 -0.14077972 0.70031840 -0.0025685109
## Ineq 0.43762828 -0.12181090 0.59279037 0.0177570357
## Prob 0.15567100 -0.03547596 0.04761011 0.0293376260
## Time 0.13536989 -0.05738113 -0.04488401 0.0376754405
summary(crime.pca)
## Importance of components:
## PC1 PC2 PC3 PC4 PC5 PC6 PC7
## Standard deviation 2.4534 1.6739 1.4160 1.07806 0.97893 0.74377 0.56729
## Proportion of Variance 0.4013 0.1868 0.1337 0.07748 0.06389 0.03688 0.02145
## Cumulative Proportion 0.4013 0.5880 0.7217 0.79920 0.86308 0.89996 0.92142
## PC8 PC9 PC10 PC11 PC12 PC13 PC14
## Standard deviation 0.55444 0.48493 0.44708 0.41915 0.35804 0.26333 0.2418
## Proportion of Variance 0.02049 0.01568 0.01333 0.01171 0.00855 0.00462 0.0039
## Cumulative Proportion 0.94191 0.95759 0.97091 0.98263 0.99117 0.99579 0.9997
## PC15
## Standard deviation 0.06793
## Proportion of Variance 0.00031
## Cumulative Proportion 1.00000
screeplot(crime.pca, type="lines",col="blue")
From our elbow plot, we can see that after 4 principle componenets, there starts to be less of an advantage of using more. So we’ll build a regression model with the first 4 principal components.
pc4 <- crime.pca$x[,1:4] #Using the first 4 columns of our principle components
PCAcrime <- as.data.frame(cbind(pc4, crime[,16])) #Combine our PC's with the response from our data set to create a data frame for linear regression.
PCAcrime
## PC1 PC2 PC3 PC4 V5
## 1 -4.1992835 -1.09383120 -1.11907395 0.67178115 791
## 2 1.1726630 0.67701360 -0.05244634 -0.08350709 1635
## 3 -4.1737248 0.27677501 -0.37107658 0.37793995 578
## 4 3.8349617 -2.57690596 0.22793998 0.38262331 1969
## 5 1.8392999 1.33098564 1.27882805 0.71814305 1234
## 6 2.9072336 -0.33054213 0.53288181 1.22140635 682
## 7 0.2457752 -0.07362562 -0.90742064 1.13685873 963
## 8 -0.1301330 -1.35985577 0.59753132 1.44045387 1555
## 9 -3.6103169 -0.68621008 1.28372246 0.55171150 856
## 10 1.1672376 3.03207033 0.37984502 -0.28887026 705
## 11 2.5384879 -2.66771358 1.54424656 -0.87671210 1674
## 12 1.0065920 -0.06044849 1.18861346 -1.31261964 849
## 13 0.5161143 0.97485189 1.83351610 -1.59117618 511
## 14 0.4265556 1.85044812 1.02893477 -0.07789173 664
## 15 -3.3435299 0.05182823 -1.01358113 0.08840211 798
## 16 -3.0310689 -2.10295524 -1.82993161 0.52347187 946
## 17 -0.2262961 1.44939774 -1.37565975 0.28960865 539
## 18 -0.1127499 -0.39407030 -0.38836278 3.97985093 929
## 19 2.9195668 -1.58646124 0.97612613 0.78629766 750
## 20 2.2998485 -1.73396487 -2.82423222 -0.23281758 1225
## 21 1.1501667 0.13531015 0.28506743 -2.19770548 742
## 22 -5.6594827 -1.09730404 0.10043541 -0.05245484 439
## 23 -0.1011749 -0.57911362 0.71128354 -0.44394773 1216
## 24 1.3836281 1.95052341 -2.98485490 -0.35942784 968
## 25 0.2727756 2.63013778 1.83189535 0.05207518 523
## 26 4.0565577 1.17534729 -0.81690756 1.66990720 1993
## 27 0.8929694 0.79236692 1.26822542 -0.57575615 342
## 28 0.1514495 1.44873320 0.10857670 -0.51040146 1216
## 29 3.5592481 -4.76202163 0.75080576 0.64692974 1043
## 30 -4.1184576 -0.38073981 1.43463965 0.63330834 696
## 31 -0.6811731 1.66926027 -2.88645794 -1.30977099 373
## 32 1.7157269 -1.30836339 -0.55971313 -0.70557980 754
## 33 -1.8860627 0.59058174 1.43570145 0.18239089 1072
## 34 1.9526349 0.52395429 -0.75642216 0.44289927 923
## 35 1.5888864 -3.12998571 -1.73107199 -1.68604766 653
## 36 1.0709414 -1.65628271 0.79436888 -1.85172698 1272
## 37 -4.1101715 0.15766712 2.36296974 -0.56868399 831
## 38 -0.7254706 2.89263339 -0.36348376 -0.50612576 566
## 39 -3.3451254 -0.95045293 0.19551398 -0.27716645 826
## 40 -1.0644466 -1.05265304 0.82886286 -0.12042931 1151
## 41 1.4933989 1.86712106 1.81853582 -1.06112429 880
## 42 -0.6789284 1.83156328 -1.65435992 0.95121379 542
## 43 -2.4164258 -0.46701087 1.42808323 0.41149015 823
## 44 2.2978729 0.41865689 -0.64422929 -0.63462770 1030
## 45 -2.9245282 -1.19488555 -3.35139309 -1.48966984 455
## 46 1.7654525 0.95655926 0.98576138 1.05683769 508
## 47 2.3125056 2.56161119 -1.58223354 0.59863946 849
model1<- lm(V5 ~., data = PCAcrime) #Linear regression on our PC
summary(model1)
##
## Call:
## lm(formula = V5 ~ ., data = PCAcrime)
##
## Residuals:
## Min 1Q Median 3Q Max
## -557.76 -210.91 -29.08 197.26 810.35
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 905.09 49.07 18.443 < 2e-16 ***
## PC1 65.22 20.22 3.225 0.00244 **
## PC2 -70.08 29.63 -2.365 0.02273 *
## PC3 25.19 35.03 0.719 0.47602
## PC4 69.45 46.01 1.509 0.13872
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 336.4 on 42 degrees of freedom
## Multiple R-squared: 0.3091, Adjusted R-squared: 0.2433
## F-statistic: 4.698 on 4 and 42 DF, p-value: 0.003178
Now that we have our model, we need to specify it in terms of our original variables and then compare it to our solution from 8.2. We’ll start by obtaining the PC coefficients from the summary of of our model and transform them back into coefficients for our orginal variables.
https://stackoverflow.com/questions/29783790/how-to-reverse-pca-in-prcomp-to-get-original-data
coeffs <- model1$coefficients[2:5]
coeffs
## PC1 PC2 PC3 PC4
## 65.21593 -70.08312 25.19408 69.44603
inter <- model1$coefficients[1]
eigvectors <-crime.pca$rotation[,1:4] #Need to multiply our pca coefficients to the matrix of variable loadings(i.e, a matrix whose coluymns contain the eigen vectors)
trans.coeff <- eigvectors %*% coeffs
trans.coeff #coefficients back into our original variables
## [,1]
## M -21.277963
## So 10.223091
## Ed 14.352610
## Po1 63.456426
## Po2 64.557974
## LF -14.005349
## M.F -24.437572
## Pop 39.830667
## NW 15.434545
## U1 -27.222281
## U2 1.425902
## Wealth 38.607855
## Ineq -27.536348
## Prob 3.295707
## Time -6.612616
Using the same data as 9.1 and using tree function from tree pacakge to fit the data.
library(tree)
head(crime)
## M So Ed Po1 Po2 LF M.F Pop NW U1 U2 Wealth Ineq Prob
## 1 15.1 1 9.1 5.8 5.6 0.510 95.0 33 30.1 0.108 4.1 3940 26.1 0.084602
## 2 14.3 0 11.3 10.3 9.5 0.583 101.2 13 10.2 0.096 3.6 5570 19.4 0.029599
## 3 14.2 1 8.9 4.5 4.4 0.533 96.9 18 21.9 0.094 3.3 3180 25.0 0.083401
## 4 13.6 0 12.1 14.9 14.1 0.577 99.4 157 8.0 0.102 3.9 6730 16.7 0.015801
## 5 14.1 0 12.1 10.9 10.1 0.591 98.5 18 3.0 0.091 2.0 5780 17.4 0.041399
## 6 12.1 0 11.0 11.8 11.5 0.547 96.4 25 4.4 0.084 2.9 6890 12.6 0.034201
## Time Crime
## 1 26.2011 791
## 2 25.2999 1635
## 3 24.3006 578
## 4 29.9012 1969
## 5 21.2998 1234
## 6 20.9995 682
treemodel <- tree(Crime~., data = crime)
summary(treemodel)
##
## Regression tree:
## tree(formula = Crime ~ ., data = crime)
## Variables actually used in tree construction:
## [1] "Po1" "Pop" "LF" "NW"
## Number of terminal nodes: 7
## Residual mean deviance: 47390 = 1896000 / 40
## Distribution of residuals:
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -573.900 -98.300 -1.545 0.000 110.600 490.100
plot(treemodel)
text(treemodel)
From using the tree function and plotting the tree with text, we can observe that our function found the best 4 predictors to branch from were Po1, Pop, LF, and NW.
Using the same data as 9.1 and using tree function from tree pacakge to fit the data. So since our regression tree model found 4 predictors to use, we will set the same
library(randomForest)
## randomForest 4.6-14
## Type rfNews() to see new features/changes/bug fixes.
forestmodel <- randomForest(Crime~., data=crime, mtry = 4)
forestmodel
##
## Call:
## randomForest(formula = Crime ~ ., data = crime, mtry = 4)
## Type of random forest: regression
## Number of trees: 500
## No. of variables tried at each split: 4
##
## Mean of squared residuals: 85324.85
## % Var explained: 41.72
summary(forestmodel)
## Length Class Mode
## call 4 -none- call
## type 1 -none- character
## predicted 47 -none- numeric
## mse 500 -none- numeric
## rsq 500 -none- numeric
## oob.times 47 -none- numeric
## importance 15 -none- numeric
## importanceSD 0 -none- NULL
## localImportance 0 -none- NULL
## proximity 0 -none- NULL
## ntree 1 -none- numeric
## mtry 1 -none- numeric
## forest 11 -none- list
## coefs 0 -none- NULL
## y 47 -none- numeric
## test 0 -none- NULL
## inbag 0 -none- NULL
## terms 3 terms call
A situation where logistic regression could be useful in business analytics when responses or outcomes need to be predicted as probabilities. For example, predicting the probability a customer will stop using a specific companies services based on a set of predictors. Predictors that could be used can be things like the length of time the a particular individual has been a customer before leaving or the customer feedback on the company.
german <- read.table("germancredit.txt")
head(german)
## V1 V2 V3 V4 V5 V6 V7 V8 V9 V10 V11 V12 V13 V14 V15 V16 V17 V18
## 1 A11 6 A34 A43 1169 A65 A75 4 A93 A101 4 A121 67 A143 A152 2 A173 1
## 2 A12 48 A32 A43 5951 A61 A73 2 A92 A101 2 A121 22 A143 A152 1 A173 1
## 3 A14 12 A34 A46 2096 A61 A74 2 A93 A101 3 A121 49 A143 A152 1 A172 2
## 4 A11 42 A32 A42 7882 A61 A74 2 A93 A103 4 A122 45 A143 A153 1 A173 2
## 5 A11 24 A33 A40 4870 A61 A73 3 A93 A101 4 A124 53 A143 A153 2 A173 2
## 6 A14 36 A32 A46 9055 A65 A73 2 A93 A101 4 A124 35 A143 A153 1 A172 2
## V19 V20 V21
## 1 A192 A201 1
## 2 A191 A201 2
## 3 A191 A201 1
## 4 A191 A201 1
## 5 A191 A201 2
## 6 A192 A201 1
Binomial family in glm only recognises 0’s and 1’s so we need to convert our responses to all binary values.
german$V21[german$V21==1]<-0
german$V21[german$V21==2]<-1
Next, we’ll split the data into training, test, and validation sets. and create our model using the glm function in stats
traindata <- sample(1:nrow(german), size = round(nrow(german)*0.7), replace = F)
train <- german[traindata,]
valid <- german[-traindata,]
model3<- glm(V21~., family = binomial(link = "logit"), data = train)
summary(model3)
##
## Call:
## glm(formula = V21 ~ ., family = binomial(link = "logit"), data = train)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -2.2561 -0.6816 -0.3395 0.6843 2.7446
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 1.313e+00 1.353e+00 0.970 0.331874
## V1A12 -5.533e-01 2.681e-01 -2.063 0.039065 *
## V1A13 -1.250e+00 4.588e-01 -2.724 0.006443 **
## V1A14 -1.939e+00 2.910e-01 -6.663 2.68e-11 ***
## V2 3.220e-02 1.134e-02 2.838 0.004534 **
## V3A31 -1.778e-01 6.606e-01 -0.269 0.787833
## V3A32 -1.036e+00 5.294e-01 -1.957 0.050334 .
## V3A33 -1.414e+00 5.722e-01 -2.471 0.013472 *
## V3A34 -2.126e+00 5.534e-01 -3.842 0.000122 ***
## V4A41 -1.716e+00 4.663e-01 -3.679 0.000234 ***
## V4A410 -2.691e+00 1.228e+00 -2.191 0.028450 *
## V4A42 -8.837e-01 3.273e-01 -2.700 0.006933 **
## V4A43 -9.902e-01 2.978e-01 -3.325 0.000883 ***
## V4A44 -1.459e+01 5.169e+02 -0.028 0.977486
## V4A45 5.895e-02 7.310e-01 0.081 0.935721
## V4A46 1.514e-01 4.622e-01 0.327 0.743319
## V4A48 -1.021e+00 1.243e+00 -0.822 0.411297
## V4A49 -1.041e+00 4.088e-01 -2.547 0.010860 *
## V5 8.776e-05 5.451e-05 1.610 0.107394
## V6A62 -1.124e-01 3.457e-01 -0.325 0.745083
## V6A63 -8.989e-01 5.808e-01 -1.548 0.121673
## V6A64 -1.629e+00 6.544e-01 -2.489 0.012825 *
## V6A65 -8.437e-01 3.217e-01 -2.622 0.008733 **
## V7A72 1.547e-01 5.084e-01 0.304 0.760958
## V7A73 -2.484e-01 4.913e-01 -0.506 0.613124
## V7A74 -7.885e-01 5.416e-01 -1.456 0.145451
## V7A75 -5.122e-02 4.857e-01 -0.105 0.916011
## V8 1.591e-01 1.075e-01 1.480 0.138851
## V9A92 -2.794e-02 5.027e-01 -0.056 0.955675
## V9A93 -5.497e-01 4.966e-01 -1.107 0.268363
## V9A94 -1.947e-01 5.966e-01 -0.326 0.744158
## V10A102 3.787e-01 5.209e-01 0.727 0.467216
## V10A103 -7.809e-01 5.592e-01 -1.396 0.162605
## V11 4.949e-02 1.072e-01 0.462 0.644195
## V12A122 1.985e-01 3.114e-01 0.638 0.523745
## V12A123 2.414e-01 2.927e-01 0.825 0.409634
## V12A124 7.392e-01 5.473e-01 1.351 0.176835
## V13 -8.123e-03 1.142e-02 -0.711 0.476896
## V14A142 6.104e-02 4.802e-01 0.127 0.898841
## V14A143 -5.777e-01 2.960e-01 -1.952 0.050992 .
## V15A152 -4.780e-01 2.910e-01 -1.643 0.100432
## V15A153 -8.604e-01 5.951e-01 -1.446 0.148202
## V16 3.004e-01 2.275e-01 1.320 0.186682
## V17A172 3.192e-01 9.251e-01 0.345 0.730100
## V17A173 4.684e-01 8.962e-01 0.523 0.601266
## V17A174 5.084e-01 8.905e-01 0.571 0.568071
## V18 -2.373e-02 2.972e-01 -0.080 0.936362
## V19A192 -3.457e-01 2.519e-01 -1.372 0.169968
## V20A202 -1.431e+00 8.283e-01 -1.727 0.084117 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 860.23 on 699 degrees of freedom
## Residual deviance: 604.33 on 651 degrees of freedom
## AIC: 702.33
##
## Number of Fisher Scoring iterations: 14
Using the summary, we can see that many predictors are not significant due to their p values. We will remake the model with only the predictors that are significant.
model4<- glm(V21 ~ V1 + V2 + V3 + V4 + V5 + V6 + V7 + V8+ V9 + V10 + V12+ V14 + V15 + V16 +V20, family = binomial(link = "logit"), data = train)
summary(model4)
##
## Call:
## glm(formula = V21 ~ V1 + V2 + V3 + V4 + V5 + V6 + V7 + V8 + V9 +
## V10 + V12 + V14 + V15 + V16 + V20, family = binomial(link = "logit"),
## data = train)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -2.2230 -0.6718 -0.3487 0.6691 2.7023
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 1.463e+00 1.010e+00 1.449 0.147432
## V1A12 -5.589e-01 2.651e-01 -2.108 0.034994 *
## V1A13 -1.309e+00 4.521e-01 -2.895 0.003793 **
## V1A14 -1.938e+00 2.893e-01 -6.701 2.07e-11 ***
## V2 3.563e-02 1.106e-02 3.222 0.001274 **
## V3A31 -1.938e-01 6.546e-01 -0.296 0.767129
## V3A32 -1.068e+00 5.260e-01 -2.030 0.042338 *
## V3A33 -1.415e+00 5.698e-01 -2.483 0.013031 *
## V3A34 -2.129e+00 5.501e-01 -3.871 0.000108 ***
## V4A41 -1.723e+00 4.605e-01 -3.741 0.000183 ***
## V4A410 -2.700e+00 1.187e+00 -2.275 0.022916 *
## V4A42 -8.325e-01 3.214e-01 -2.591 0.009581 **
## V4A43 -9.585e-01 2.950e-01 -3.249 0.001158 **
## V4A44 -1.450e+01 5.203e+02 -0.028 0.977772
## V4A45 5.475e-02 7.225e-01 0.076 0.939598
## V4A46 1.066e-01 4.569e-01 0.233 0.815533
## V4A48 -8.769e-01 1.226e+00 -0.715 0.474358
## V4A49 -1.122e+00 4.047e-01 -2.772 0.005572 **
## V5 6.777e-05 5.102e-05 1.328 0.184075
## V6A62 -1.059e-01 3.424e-01 -0.309 0.757161
## V6A63 -9.474e-01 5.722e-01 -1.656 0.097765 .
## V6A64 -1.648e+00 6.520e-01 -2.528 0.011459 *
## V6A65 -8.807e-01 3.179e-01 -2.770 0.005604 **
## V7A72 2.685e-01 4.343e-01 0.618 0.536406
## V7A73 -1.200e-01 4.095e-01 -0.293 0.769420
## V7A74 -6.503e-01 4.705e-01 -1.382 0.166937
## V7A75 3.478e-02 4.176e-01 0.083 0.933624
## V8 1.504e-01 1.039e-01 1.448 0.147574
## V9A92 -3.697e-02 4.905e-01 -0.075 0.939930
## V9A93 -5.790e-01 4.824e-01 -1.200 0.230056
## V9A94 -2.081e-01 5.838e-01 -0.356 0.721511
## V10A102 4.683e-01 5.215e-01 0.898 0.369262
## V10A103 -7.702e-01 5.527e-01 -1.393 0.163477
## V12A122 2.170e-01 3.073e-01 0.706 0.480137
## V12A123 2.768e-01 2.857e-01 0.969 0.332533
## V12A124 6.642e-01 5.384e-01 1.234 0.217372
## V14A142 8.685e-02 4.781e-01 0.182 0.855862
## V14A143 -5.436e-01 2.939e-01 -1.850 0.064348 .
## V15A152 -5.341e-01 2.752e-01 -1.941 0.052286 .
## V15A153 -8.543e-01 5.824e-01 -1.467 0.142368
## V16 2.602e-01 2.203e-01 1.181 0.237630
## V20A202 -1.418e+00 8.251e-01 -1.718 0.085710 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 860.23 on 699 degrees of freedom
## Residual deviance: 607.42 on 658 degrees of freedom
## AIC: 691.42
##
## Number of Fisher Scoring iterations: 14