This script examines whether the vocabulary and grammar sections of the CDI can be distinguished as separate dimensions. This analysis considers only observations with a unique value on original_id. When we select data for which longitudinal==false there are still many observations with the same values of original_id. Most of these appear to be participants who were tested at multiple time points as part of a data set that wasn’t exclusively longitudinal. However the help file for Wordbank indicates that this variable is not always reliable. We have therefore decided to run our analyses twice: with) and without multiple instances of original id.
Open the libraries.
options(max.print=500)
library(wordbankr) # WB data
library(tidyverse) # tidy
library(mirt) # IRT models
library(psych) # some psychometric stuff (tests of dimensionality)
library(Gifi)# some more psychometric stuff (tests of dimensionality)
library(knitr) # some formatting, tables, etc
library(patchwork) # combining plots.
library(sirt) # additional IRT functions
Download data sets.
Inst <- get_instrument_data(language="English (American)", form="WS")
Admin <- get_administration_data(language="English (American)", form="WS", original_ids=TRUE) # original IDs for getting additional instances
N_total = nrow(Admin) # making sure things add up later
N_long = nrow(filter(Admin, longitudinal==TRUE)) # making sure things add up later
Item <- get_item_data(language="English (American)", form = "WS")
Select 1 observation (the oldest) for each original id.
Quick_Test_Admin <- Admin %>%
filter(longitudinal==FALSE) %>%
group_by(original_id) %>%
arrange(age) %>%
mutate(
D = 1,
trial_num = row_number(),
total_N = sum(D), # Total number of trials per participant
max_trial = ifelse(total_N == trial_num, yes=1, no=0
)) %>%
ungroup()
ggplot(Quick_Test_Admin, aes(x=total_N)) + geom_histogram()
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
Quick_Test_Admin %>%
group_by(total_N) %>%
count()
Make sure I’ve selected latest administration.
Quick_Test_Admin %>%
filter(max_trial ==1) %>%
summarise(
Mean= mean(age),
Min= min(age),
max = max(age)
)
Quick_Test_Admin %>%
filter(max_trial ==1) %>%
filter( age < 19) %>%
count()
Quick_Test_Admin %>%
filter(trial_num ==1) %>%
summarise(
Mean= mean(age),
Min= min(age),
max = max(age)
)
Quick_Test_Admin %>%
filter(trial_num==1) %>%
filter( age < 19) %>%
count()
Ok, that did what I wanted it to do, so now we’ll make a new dataset that only has the last administration for each value of original id.
Admin2 <- Admin %>%
filter(longitudinal==FALSE) %>%
group_by(original_id) %>%
arrange(age) %>%
mutate(
D = 1,
trial_num = row_number(),
total_N = sum(D),
max_trial = ifelse(total_N == trial_num, yes=1, no=0)
) %>%
ungroup(original_id) %>%
filter(max_trial==1)
N_dropped <- Admin %>%
filter(longitudinal==FALSE) %>%
group_by(original_id) %>%
arrange(age) %>%
mutate(
D = 1,
trial_num = row_number(),
total_N = sum(D),
max_trial = ifelse(total_N == trial_num, yes=1, no=0)
) %>%
ungroup(original_id) %>%
filter(max_trial==0) %>%
nrow()
Complex <- Admin2 %>% # start with new admin data set
left_join(.,Inst, by="data_id") %>%
left_join(., Item, by="num_item_id") %>%
filter(type == "combine" | # to drop non-combiners
type == "complexity" # to calculate complexity scores.
) %>%
mutate(
out = ifelse(value=="complex" | value=="sometimes" | value=="produces", yes=1,
no = ifelse(value=="often", yes=2, no =0))
)
N_complexity_items = nrow(filter(Item, type == "combine" |
type == "complexity"))
nrow(Complex) == (N_total - N_long - N_dropped)*N_complexity_items
## [1] TRUE
Dropped_long <- N_long + N_dropped # dropped because of both definitions of longitudinal
Complex$complexity_category <- ifelse(Complex$complexity_category == "", yes=Complex$type, no=Complex$complexity_category)
Complex_short_with_ids_all <- Complex %>%
dplyr::select(data_id, value, out, complexity_category, num_item_id) %>%
mutate(
label = str_c(complexity_category, num_item_id)
) %>%
pivot_wider(id_cols=data_id, names_from = "label", values_from="out") %>%
dplyr::select(starts_with(c("data_id", "combine", "morphology", "syntax")))
Complex_short_with_ids <- Complex_short_with_ids_all %>%
drop_na()
N_NA <- nrow(Complex_short_with_ids_all) - nrow(Complex_short_with_ids)
nrow(Complex_short_with_ids) == N_total - Dropped_long - N_NA
## [1] TRUE
Complex_short_grammatical <- Complex_short_with_ids %>%
filter(combine760 > 0) %>%
dplyr::select(starts_with(c("morphology", "syntax"))) # need to get rid of participant names for IRT models.
N_nog <- nrow(filter(Complex_short_with_ids, combine760 == 0))
nrow(Complex_short_grammatical) == N_total - Dropped_long - N_NA - N_nog
## [1] TRUE
##Make vocab data set.
Vocab <- Admin2 %>% # Start with new administration dataset.
left_join(.,Inst, by="data_id") %>%
left_join(., Item, by="num_item_id") %>%
filter(type == "word"
) %>%
mutate(
out = ifelse(value=="produces", yes=1, no =0)
)
N_vocab = nrow(filter(Item, type == "word"))
nrow(Vocab) == (N_total - Dropped_long)*N_vocab
## [1] TRUE
Vocab_short_with_ids_all <- Vocab %>%
filter(lexical_category == "nouns" | lexical_category == "predicates") %>%
dplyr::select(data_id, value, out, definition) %>%
pivot_wider(id_cols=data_id, names_from = "definition", values_from="out")
Vocab_short_with_ids <- Vocab_short_with_ids_all %>%
drop_na()
N_NA_vocab <- nrow(Vocab_short_with_ids_all) - nrow(Vocab_short_with_ids)
Vocab_short <- Vocab_short_with_ids %>%
dplyr::select(-"data_id") # dataset for IRT can't have IDs
nrow(Vocab_short_with_ids) == N_total - Dropped_long - N_NA_vocab #Not yet updated
## [1] TRUE
#Combine the two datasets
full <- full_join(
Complex_short_with_ids, Vocab_short_with_ids, by="data_id"
) %>%
filter(combine760 > 0) %>%
dplyr::select(-c("data_id", "combine760")) %>%
drop_na()
nrow(full)/2187 # size of new dataset relative to the one in the prior analysis
## [1] 0.8573388
full_tetra <- tetrachoric(full)
## For i = 89 j = 53 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 132 j = 95 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 144 j = 95 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 147 j = 95 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 153 j = 95 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 168 j = 95 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 177 j = 95 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 177 j = 96 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 177 j = 113 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 177 j = 124 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 177 j = 131 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 197 j = 95 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 197 j = 146 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 203 j = 113 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 203 j = 146 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 203 j = 150 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 205 j = 95 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 206 j = 124 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 208 j = 95 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 219 j = 177 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 227 j = 203 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 230 j = 95 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 235 j = 95 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 262 j = 95 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 291 j = 226 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 291 j = 289 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 321 j = 95 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 424 j = 95 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 485 j = 482 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## For i = 496 j = 482 A cell entry of 0 was replaced with correct = 0.5. Check your data!
## Warning in cor.smooth(mat): Matrix was not positive definite, smoothing was done
rho <- full_tetra$rho
fa.parallel(rho, fa="fa", fm="minres", cor="poly", n.obs = 1875)
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## The determinant of the smoothed correlation was zero.
## This means the objective function is not defined.
## Chi square is based upon observed residuals.
## The determinant of the smoothed correlation was zero.
## This means the objective function is not defined for the null model either.
## The Chi square is thus based upon observed correlations.
## Warning in fa.stats(r = r, f = f, phi = phi, n.obs = n.obs, np.obs = np.obs, :
## The estimated weights for the factor scores are probably incorrect. Try a
## different factor score estimation method.
## Parallel analysis suggests that the number of factors = 20 and the number of components = NA
vss(rho, fa="fa", fm="minres", cor="poly", n.obs = 1875)
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## The determinant of the smoothed correlation was zero.
## This means the objective function is not defined.
## Chi square is based upon observed residuals.
## The determinant of the smoothed correlation was zero.
## This means the objective function is not defined for the null model either.
## The Chi square is thus based upon observed correlations.
## Warning in fa.stats(r = r, f = f, phi = phi, n.obs = n.obs, np.obs = np.obs, :
## The estimated weights for the factor scores are probably incorrect. Try a
## different factor score estimation method.
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## The determinant of the smoothed correlation was zero.
## This means the objective function is not defined.
## Chi square is based upon observed residuals.
## The determinant of the smoothed correlation was zero.
## This means the objective function is not defined for the null model either.
## The Chi square is thus based upon observed correlations.
## Warning in fa.stats(r = r, f = f, phi = phi, n.obs = n.obs, np.obs = np.obs, :
## The estimated weights for the factor scores are probably incorrect. Try a
## different factor score estimation method.
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## The determinant of the smoothed correlation was zero.
## This means the objective function is not defined.
## Chi square is based upon observed residuals.
## The determinant of the smoothed correlation was zero.
## This means the objective function is not defined for the null model either.
## The Chi square is thus based upon observed correlations.
## Warning in fa.stats(r = r, f = f, phi = phi, n.obs = n.obs, np.obs = np.obs, :
## The estimated weights for the factor scores are probably incorrect. Try a
## different factor score estimation method.
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## The determinant of the smoothed correlation was zero.
## This means the objective function is not defined.
## Chi square is based upon observed residuals.
## The determinant of the smoothed correlation was zero.
## This means the objective function is not defined for the null model either.
## The Chi square is thus based upon observed correlations.
## Warning in fa.stats(r = r, f = f, phi = phi, n.obs = n.obs, np.obs = np.obs, :
## The estimated weights for the factor scores are probably incorrect. Try a
## different factor score estimation method.
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## The determinant of the smoothed correlation was zero.
## This means the objective function is not defined.
## Chi square is based upon observed residuals.
## The determinant of the smoothed correlation was zero.
## This means the objective function is not defined for the null model either.
## The Chi square is thus based upon observed correlations.
## Warning in fa.stats(r = r, f = f, phi = phi, n.obs = n.obs, np.obs = np.obs, :
## The estimated weights for the factor scores are probably incorrect. Try a
## different factor score estimation method.
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## The determinant of the smoothed correlation was zero.
## This means the objective function is not defined.
## Chi square is based upon observed residuals.
## The determinant of the smoothed correlation was zero.
## This means the objective function is not defined for the null model either.
## The Chi square is thus based upon observed correlations.
## Warning in fa.stats(r = r, f = f, phi = phi, n.obs = n.obs, np.obs = np.obs, :
## The estimated weights for the factor scores are probably incorrect. Try a
## different factor score estimation method.
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## The determinant of the smoothed correlation was zero.
## This means the objective function is not defined.
## Chi square is based upon observed residuals.
## The determinant of the smoothed correlation was zero.
## This means the objective function is not defined for the null model either.
## The Chi square is thus based upon observed correlations.
## Warning in fa.stats(r = r, f = f, phi = phi, n.obs = n.obs, np.obs = np.obs, :
## The estimated weights for the factor scores are probably incorrect. Try a
## different factor score estimation method.
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## In smc, smcs < 0 were set to .0
## The determinant of the smoothed correlation was zero.
## This means the objective function is not defined.
## Chi square is based upon observed residuals.
## The determinant of the smoothed correlation was zero.
## This means the objective function is not defined for the null model either.
## The Chi square is thus based upon observed correlations.
## Warning in fa.stats(r = r, f = f, phi = phi, n.obs = n.obs, np.obs = np.obs, :
## The estimated weights for the factor scores are probably incorrect. Try a
## different factor score estimation method.
##
## Very Simple Structure
## Call: vss(x = rho, fm = "minres", n.obs = 1875, cor = "poly", fa = "fa")
## VSS complexity 1 achieves a maximimum of 0.99 with 1 factors
## VSS complexity 2 achieves a maximimum of 1 with 2 factors
##
## The Velicer MAP achieves a minimum of 0 with 8 factors
## BIC achieves a minimum of -729454 with 8 factors
## Sample Size adjusted BIC achieves a minimum of -321964 with 8 factors
##
## Statistics by number of factors
## vss1 vss2 map dof chisq prob sqresid fit RMSEA BIC SABIC
## 1 0.99 0.00 0.0104 131840 873693 0 514 0.99 0.055 -119901 298953
## 2 0.47 1.00 0.0067 131326 535412 0 315 1.00 0.041 -454308 -37087
## 3 0.35 0.83 0.0057 130813 427214 0 251 1.00 0.035 -558640 -143049
## 4 0.29 0.72 0.0049 130301 355614 0 209 1.00 0.030 -626382 -212417
## 5 0.27 0.71 0.0042 129790 299847 0 177 1.00 0.026 -678297 -265956
## 6 0.29 0.73 0.0039 129280 272858 0 161 1.00 0.024 -701443 -290722
## 7 0.29 0.74 0.0038 128771 254953 0 150 1.00 0.023 -715512 -306408
## 8 0.29 0.74 0.0037 128263 237183 0 140 1.00 0.021 -729454 -321964
## complex eChisq SRMR eCRMS eBIC
## 1 1.0 808116 0.040 0.040 -185478
## 2 1.8 455701 0.030 0.030 -534019
## 3 2.4 347573 0.026 0.027 -638281
## 4 2.8 276890 0.024 0.024 -705106
## 5 3.1 222613 0.021 0.021 -755532
## 6 3.1 197768 0.020 0.020 -776533
## 7 3.2 182032 0.019 0.019 -788433
## 8 3.3 166564 0.018 0.019 -800073
#Fit IRT model Try MIRT model.
#m1 <- mirt(full, 1, "2PL")
#saveRDS(m1, "combined_irt_output/re_test/m1.rds")
m1 <- readRDS("combined_irt_output/re_test/m1.rds")
Get factor scores
fscores <- fscores(m1, use_dentype_estimate=TRUE)[,1]
Exploratory Detect
full2 <- data.frame(full)
##Confirmatory DETECT: Grammar vs Vocab
dtct <- c(rep(1, 37), rep(2, 478))
conf <- conf.detect(full2, fscores, dtct)
## -----------------------------------------------------------
## Confirmatory DETECT Analysis
## Conditioning on 1 Score
## Bandwidth Scale: 1.1
## Pairwise Estimation of Conditional Covariances
## ...........................................................
## Nonparametric ICC estimation
## 5% 10% 15% 20% 25% 30% 35% 40% 45% 50%
## 55% 60% 65% 70% 75% 80% 85% 90% 95%
## ...........................................................
## Nonparametric Estimation of conditional covariances
## 5% 10% 15% 20% 25% 30% 35% 40% 45% 50%
## 55% 60% 65% 70% 75% 80% 85% 90% 95%
## -----------------------------------------------------------
## unweighted weighted
## DETECT 0.103 0.103
## ASSI 0.072 0.072
## RATIO 0.245 0.245
## MADCOV100 0.420 0.420
## MCOV100 0.069 0.069
DETECT is less than .2. No evidence of the dimensional structure with vocabulary and grammar.
Exploratory detect to look for any other form of multidimensionality.
d1 <- expl.detect(full2, fscores, nclusters=2)
## Pairwise Estimation of Conditional Covariances
## ...........................................................
## Nonparametric ICC estimation
## 5% 10% 15% 20% 25% 30% 35% 40% 45% 50%
## 55% 60% 65% 70% 75% 80% 85% 90% 95%
## ...........................................................
## Nonparametric Estimation of conditional covariances
## 5% 10% 15% 20% 25% 30% 35% 40% 45% 50%
## 55% 60% 65% 70% 75% 80% 85% 90% 95%
## Pairwise Estimation of Conditional Covariances
## ...........................................................
## Nonparametric ICC estimation
## 5% 10% 15% 20% 25% 30% 35% 40% 45% 50%
## 55% 60% 65% 70% 75% 80% 85% 90% 95%
## ...........................................................
## Nonparametric Estimation of conditional covariances
## 5% 10% 15% 20% 25% 30% 35% 40% 45% 50%
## 55% 60% 65% 70% 75% 80% 85% 90% 95%
##
##
## DETECT (unweighted)
##
## Optimal Cluster Size is 2 (Maximum of DETECT Index)
##
## N.Cluster N.items N.est N.val size.cluster DETECT.est ASSI.est RATIO.est
## 1 2 515 937 938 189-326 0.293 0.408 0.595
## MADCOV100.est MCOV100.est DETECT.val ASSI.val RATIO.val MADCOV100.val
## 1 0.492 0.091 0.222 0.299 0.482 0.461
## MCOV100.val
## 1 0.094
Some evidence of multidimensionality. Let’s look at which items belong to each dimension.
options(max.print=2000)
d1
## $detect.unweighted
## DETECT.val ASSI.val RATIO.val MADCOV100.val MCOV100.val
## Cl2 0.222258 0.2986891 0.4822945 0.4608347 0.09425026
##
## $detect.weighted
## DETECT.val ASSI.val RATIO.val MADCOV100.val MCOV100.val
## Cl2 0.222258 0.2986891 0.4822945 0.4608347 0.09425026
##
## $clusterfit
##
## Call:
## stats::hclust(d = d, method = "ward.D")
##
## Cluster method : ward.D
## Number of objects: 515
##
##
## $itemcluster
## item cluster2
## 1 morphology761 1
## 2 morphology762 1
## 3 morphology763 1
## 4 morphology764 1
## 5 morphology765 1
## 6 morphology766 1
## 7 morphology767 1
## 8 morphology768 1
## 9 morphology769 1
## 10 morphology770 1
## 11 morphology771 1
## 12 morphology772 1
## 13 syntax773 1
## 14 syntax774 1
## 15 syntax775 1
## 16 syntax776 1
## 17 syntax777 1
## 18 syntax778 1
## 19 syntax779 1
## 20 syntax780 1
## 21 syntax781 1
## 22 syntax782 1
## 23 syntax783 1
## 24 syntax784 1
## 25 syntax785 1
## 26 syntax786 1
## 27 syntax787 1
## 28 syntax788 1
## 29 syntax789 1
## 30 syntax790 1
## 31 syntax791 1
## 32 syntax792 1
## 33 syntax793 1
## 34 syntax794 1
## 35 syntax795 1
## 36 syntax796 1
## 37 syntax797 1
## 38 alligator 2
## 39 animal 2
## 40 ant 2
## 41 bear 2
## 42 bee 2
## 43 bird 2
## 44 bug 2
## 45 bunny 2
## 46 butterfly 2
## 47 cat 2
## 48 chicken..animal. 2
## 49 cow 2
## 50 deer 2
## 51 dog 2
## 52 donkey 2
## 53 duck 2
## 54 elephant 2
## 55 fish..animal. 2
## 56 frog 2
## 57 giraffe 2
## 58 goose 2
## 59 hen 2
## 60 horse 2
## 61 kitty 2
## 62 lamb 2
## 63 lion 2
## 64 monkey 2
## 65 moose 2
## 66 mouse 2
## 67 owl 2
## 68 penguin 2
## 69 pig 2
## 70 pony 2
## 71 puppy 2
## 72 rooster 2
## 73 sheep 2
## 74 squirrel 2
## 75 teddybear 2
## 76 tiger 2
## 77 turkey 2
## 78 turtle 2
## 79 wolf 2
## 80 zebra 2
## 81 airplane 2
## 82 bicycle 2
## 83 boat 2
## 84 bus 2
## 85 car 2
## 86 firetruck 2
## 87 helicopter 2
## 88 motorcycle 2
## 89 sled 2
## 90 stroller 2
## 91 tractor 2
## 92 train 2
## 93 tricycle 2
## 94 truck 2
## 95 ball 2
## 96 balloon 2
## 97 bat 2
## 98 block 2
## 99 book 2
## 100 bubbles 2
## 101 chalk 2
## 102 crayon 2
## 103 doll 2
## 104 game 2
## 105 glue 2
## 106 pen 2
## 107 pencil 2
## 108 play.dough 2
## 109 present 2
## 110 puzzle 2
## 111 story 2
## 112 toy..object. 2
## 113 apple 2
## 114 applesauce 2
## 115 banana 2
## 116 beans 2
## 117 bread 2
## 118 butter 2
## 119 cake 2
## 120 candy 2
## 121 carrots 2
## 122 cereal 2
## 123 cheerios 2
## 124 cheese 2
## 125 chicken..food. 2
## 126 chocolate 2
## 127 coffee 2
## 128 coke 2
## 129 cookie 2
## 130 corn 2
## 131 cracker 2
## 132 donut 2
## 133 drink..beverage. 1
## 134 egg 2
## 135 fish..food. 2
## 136 food 2
## 137 french.fries 2
## 138 grapes 2
## 139 green.beans 2
## 140 gum 2
## 141 hamburger 2
## 142 ice 2
## 143 ice.cream 2
## 144 jello 2
## 145 jelly 2
## 146 juice 2
## 147 lollipop 2
## 148 meat 2
## 149 melon 2
## 150 milk 2
## 151 muffin 2
## 152 noodles 2
## 153 nuts 2
## 154 orange..food. 2
## 155 pancake 2
## 156 peas 2
## 157 peanut.butter 2
## 158 pickle 2
## 159 pizza 2
## 160 popcorn 2
## 161 popsicle 2
## 162 potato.chip 2
## 163 potato 2
## 164 pretzel 2
## 165 pudding 2
## 166 pumpkin 2
## 167 raisin 2
## 168 salt 2
## 169 sandwich 2
## 170 sauce 2
## 171 soda.pop 2
## 172 soup 2
## 173 spaghetti 2
## 174 strawberry 2
## 175 toast 2
## 176 tuna 2
## 177 vanilla 2
## 178 vitamins 2
## 179 water..beverage. 2
## 180 yogurt 2
## 181 beads 2
## 182 belt 2
## 183 bib 2
## 184 boots 2
## 185 button 2
## 186 coat 2
## 187 diaper 2
## 188 dress..object. 2
## 189 gloves 2
## 190 hat 2
## 191 jacket 2
## 192 jeans 2
## 193 mittens 2
## 194 necklace 2
## 195 pajamas 2
## 196 pants 2
## 197 scarf 2
## 198 shirt 2
## 199 shoe 2
## 200 shorts 2
## 201 slipper 2
## 202 sneaker 2
## 203 snowsuit 2
## 204 sock 2
## 205 sweater 2
## 206 tights 2
## 207 underpants 2
## 208 zipper 2
## 209 ankle 2
## 210 arm 2
## 211 belly.button 2
## 212 buttocks.bottom. 2
## 213 cheek 2
## 214 chin 2
## 215 ear 2
## 216 eye 2
## 217 face 2
## 218 finger 2
## 219 foot 2
## 220 hair 2
## 221 hand 2
## 222 head 2
## 223 knee 2
## 224 leg 2
## 225 lips 2
## 226 mouth 2
## 227 nose 2
## 228 owie.boo.boo 2
## 229 penis. 2
## 230 shoulder 2
## 231 toe 2
## 232 tongue 2
## 233 tooth 2
## 234 tummy 2
## 235 vagina. 2
## 236 basket 2
## 237 blanket 2
## 238 bottle 2
## 239 bowl 2
## 240 box 2
## 241 broom 2
## 242 brush 2
## 243 bucket 2
## 244 camera 2
## 245 can..object. 2
## 246 clock 2
## 247 comb 2
## 248 cup 2
## 249 dish 2
## 250 fork 2
## 251 garbage 2
## 252 glass 2
## 253 glasses 2
## 254 hammer 2
## 255 jar 2
## 256 keys 2
## 257 knife 2
## 258 lamp 2
## 259 light 2
## 260 medicine 2
## 261 money 2
## 262 mop 2
## 263 nail 2
## 264 napkin 2
## 265 paper 2
## 266 penny 2
## 267 picture 2
## 268 pillow 2
## 269 plant 2
## 270 plate 2
## 271 purse 2
## 272 radio 2
## 273 scissors 2
## 274 soap 2
## 275 spoon 2
## 276 tape 2
## 277 telephone 2
## 278 tissue.kleenex 2
## 279 toothbrush 2
## 280 towel 2
## 281 trash 2
## 282 tray 2
## 283 vacuum 2
## 284 walker 2
## 285 watch..object. 2
## 286 basement 2
## 287 bathroom 2
## 288 bathtub 2
## 289 bed 2
## 290 bedroom 2
## 291 bench 2
## 292 chair 2
## 293 closet 2
## 294 couch 2
## 295 crib 2
## 296 door 2
## 297 drawer 2
## 298 dryer 2
## 299 garage 2
## 300 high.chair 2
## 301 kitchen 2
## 302 living.room 2
## 303 oven 2
## 304 play.pen 2
## 305 porch 2
## 306 potty 2
## 307 rocking.chair 2
## 308 refrigerator 2
## 309 room 2
## 310 shower 2
## 311 sink 2
## 312 sofa 2
## 313 stairs 2
## 314 stove 2
## 315 table 2
## 316 TV 2
## 317 window 2
## 318 washing.machine 2
## 319 backyard 2
## 320 cloud 2
## 321 flag 2
## 322 flower 2
## 323 garden 2
## 324 grass 2
## 325 hose 2
## 326 ladder 2
## 327 lawn.mower 2
## 328 moon 2
## 329 pool 2
## 330 rain 2
## 331 rock 2
## 332 roof 2
## 333 sandbox 2
## 334 shovel 2
## 335 sidewalk 2
## 336 sky 2
## 337 slide..object. 2
## 338 snow 2
## 339 snowman 2
## 340 sprinkler 2
## 341 star 2
## 342 stick 2
## 343 stone 2
## 344 street 2
## 345 sun 2
## 346 swing..object. 2
## 347 tree 2
## 348 water..not.beverage. 2
## 349 wind 1
## 350 bite 1
## 351 blow 1
## 352 break. 1
## 353 bring 1
## 354 build 1
## 355 bump 1
## 356 buy 1
## 357 carry 1
## 358 catch 1
## 359 chase 1
## 360 clap 1
## 361 clean..action. 1
## 362 climb 1
## 363 close 1
## 364 cook 1
## 365 cover 1
## 366 cry 1
## 367 cut 1
## 368 dance 1
## 369 draw 1
## 370 drink..action. 1
## 371 drive 1
## 372 drop 1
## 373 dry..action. 1
## 374 dump 1
## 375 eat 1
## 376 fall 1
## 377 feed 1
## 378 find 1
## 379 finish 1
## 380 fit 1
## 381 fix 1
## 382 get 1
## 383 give 1
## 384 go 1
## 385 hate 1
## 386 have 1
## 387 hear 1
## 388 help 1
## 389 hide 1
## 390 hit 1
## 391 hold 1
## 392 hug 1
## 393 hurry 1
## 394 jump 1
## 395 kick 1
## 396 kiss 1
## 397 knock 1
## 398 lick 1
## 399 like 1
## 400 listen 1
## 401 look 1
## 402 love 1
## 403 make 1
## 404 open 1
## 405 paint 1
## 406 pick 1
## 407 play 1
## 408 pour 1
## 409 pretend 1
## 410 pull 1
## 411 push 1
## 412 put 1
## 413 read 1
## 414 ride 1
## 415 rip 1
## 416 run 1
## 417 say 1
## 418 see 1
## 419 shake 1
## 420 share 1
## 421 show 1
## 422 sing 1
## 423 sit 1
## 424 skate 1
## 425 sleep 1
## 426 slide..action. 2
## 427 smile 1
## 428 spill 1
## 429 splash 1
## 430 stand 1
## 431 stay 1
## 432 stop 1
## 433 sweep 2
## 434 swim 2
## 435 swing..action. 2
## 436 take 1
## 437 talk 1
## 438 taste 1
## 439 tear 1
## 440 think 1
## 441 throw 1
## 442 tickle 1
## 443 touch 1
## 444 wait 1
## 445 wake 1
## 446 walk 1
## 447 wash 1
## 448 watch..action. 1
## 449 wipe 1
## 450 wish 1
## 451 work..action. 1
## 452 write 1
## 453 all.gone 2
## 454 asleep 1
## 455 awake 1
## 456 bad 1
## 457 better 1
## 458 big 1
## 459 black 2
## 460 blue 2
## 461 broken 1
## 462 brown 2
## 463 careful 1
## 464 clean..description. 1
## 465 cold 1
## 466 cute 1
## 467 dark 1
## 468 dirty 2
## 469 dry..description. 1
## 470 empty 1
## 471 fast 1
## 472 fine 1
## 473 first 1
## 474 full 1
## 475 gentle 1
## 476 good 1
## 477 green 2
## 478 happy 1
## 479 hard 1
## 480 heavy 1
## 481 high 1
## 482 hot 1
## 483 hungry 1
## 484 hurt 1
## 485 last 1
## 486 little..description. 1
## 487 long 1
## 488 loud 1
## 489 mad 1
## 490 naughty 2
## 491 new 1
## 492 nice 1
## 493 noisy 1
## 494 old 1
## 495 orange..description. 2
## 496 poor 1
## 497 pretty 1
## 498 quiet 1
## 499 red 2
## 500 sad 1
## 501 scared 1
## 502 sick 1
## 503 sleepy 1
## 504 slow 1
## 505 soft 1
## 506 sticky 1
## 507 stuck 1
## 508 thirsty 1
## 509 tiny 1
## 510 tired 1
## 511 wet 1
## 512 white 2
## 513 windy 1
## 514 yellow 2
## 515 yucky 2
##
## attr(,"class")
## [1] "expl.detect"
Looks like, mostly grammar and predicates on dimension 1, and mostly nouns on dimension 2.