pacman::p_load(tidyverse, arsenal,janitor, knitr)
This is the processed ds.
load("C:/Users/luisf/Dropbox/Puc-Rio/Projeto - Adicao a internet, imagem e alimentacao/Pesquisa/analise de dados/2020 - Grupo estudos - puc/R base - Dados after processing on December 2020 (PUCRio).RData")
dados %>% names
[1] "id_unique" "id" "data" "pais" "periodo_fac" "idade" "sexo"
[8] "altura" "peso_atual" "peso_desejado" "faz_esporte" "familia_esporte" "cr" "ceri1"
[15] "ceri2" "ceri3" "ceri4" "ceri5" "ceri6" "eat1" "eat2"
[22] "eat3" "eat4" "eat5" "eat6" "eat7" "eat8" "eat9"
[29] "eat10" "eat11" "eat12" "eat13" "eat14" "eat15" "eat16"
[36] "eat17" "eat18" "eat19" "eat20" "eat21" "eat22" "eat23"
[43] "eat24" "eat25" "eat26" "bsq1" "bsq2" "bsq3" "bsq4"
[50] "bsq5" "bsq6" "bsq7" "bsq8" "bsq9" "bsq10" "bsq11"
[57] "bsq12" "bsq13" "bsq14" "bsq15" "bsq16" "bsq17" "bsq18"
[64] "bsq19" "bsq20" "bsq21" "bsq22" "bsq23" "bsq24" "bsq25"
[71] "bsq26" "bsq27" "bsq28" "bsq29" "bsq30" "bsq31" "bsq32"
[78] "bsq33" "bsq34" "imc" "sex_female" "eat_soma" "bsq_soma" "eat1_c"
[85] "eat2_c" "eat3_c" "eat4_c" "eat5_c" "eat6_c" "eat7_c" "eat8_c"
[92] "eat9_c" "eat10_c" "eat11_c" "eat12_c" "eat13_c" "eat14_c" "eat15_c"
[99] "eat16_c" "eat17_c" "eat18_c" "eat19_c" "eat20_c" "eat21_c" "eat22_c"
[106] "eat23_c" "eat24_c" "eat25_c" "eat26_c" "bsq1_c" "bsq2_c" "bsq3_c"
[113] "bsq4_c" "bsq5_c" "bsq6_c" "bsq7_c" "bsq8_c" "bsq9_c" "bsq10_c"
[120] "bsq11_c" "bsq12_c" "bsq13_c" "bsq14_c" "bsq15_c" "bsq16_c" "bsq17_c"
[127] "bsq18_c" "bsq19_c" "bsq20_c" "bsq21_c" "bsq22_c" "bsq23_c" "bsq24_c"
[134] "bsq25_c" "bsq26_c" "bsq27_c" "bsq28_c" "bsq29_c" "bsq30_c" "bsq31_c"
[141] "bsq32_c" "bsq33_c" "bsq34_c" "eat_soma_c" "bsq_soma_c" "delta_peso" "delta_peso_percent"
[148] "country" "stat" "weight_status"
dados %>%
select(starts_with("eat") & ends_with("_c"), -eat_soma_c) %>%
psych::alpha()
Number of categories should be increased in order to count frequencies.
Some items were negatively correlated with the total scale and probably
should be reversed.
To do this, run the function again with the 'check.keys=TRUE' option
Some items ( eat13_c ) were negatively correlated with the total scale and
probably should be reversed.
To do this, run the function again with the 'check.keys=TRUE' option
Reliability analysis
Call: psych::alpha(x = .)
lower alpha upper 95% confidence boundaries
0.84 0.86 0.87
Reliability if an item is dropped:
Item statistics
dados <- dados %>%
mutate(eat_dieta = rowSums(select(., eat1_c, eat6_c,eat7_c,eat10_c,eat11_c,eat12_c,eat14_c,eat16_c,eat17_c,eat22_c, eat23_c, eat24_c, eat25_c))) %>%
mutate(eat_bulimia = rowSums(select(., eat3_c, eat4_c,eat9_c,eat18_c,eat21_c,eat26_c))) %>%
mutate(eat_oral = rowSums(select(., eat2_c, eat5_c,eat8_c,eat13_c,eat15_c,eat19_c,eat20_c)))
dados %>%
select(eat_dieta, eat_bulimia, eat_oral, eat_soma_c) %>%
cor(.)
eat_dieta eat_bulimia eat_oral eat_soma_c
eat_dieta 1.0000000 0.7017194 0.3005106 0.9377941
eat_bulimia 0.7017194 1.0000000 0.2369181 0.7877045
eat_oral 0.3005106 0.2369181 1.0000000 0.5702859
eat_soma_c 0.9377941 0.7877045 0.5702859 1.0000000
dados %>% select(., eat1_c, eat6_c,eat7_c,eat10_c,eat11_c,eat12_c,eat14_c,eat16_c,eat17_c,eat22_c, eat23_c, eat24_c, eat25_c) %>%
psych::alpha(.)
Number of categories should be increased in order to count frequencies.
Reliability analysis
Call: psych::alpha(x = .)
lower alpha upper 95% confidence boundaries
0.84 0.86 0.87
Reliability if an item is dropped:
Item statistics
dados %>% select(eat3_c, eat4_c,eat9_c,eat18_c,eat21_c,eat26_c) %>%
psych::alpha(.)
Reliability analysis
Call: psych::alpha(x = .)
lower alpha upper 95% confidence boundaries
0.56 0.6 0.64
Reliability if an item is dropped:
Item statistics
Non missing response frequency for each item
0 0.00676818950930626 0.0303030303030303 0.217687074829932 0.315789473684211 0.406408094435076 0.804920913884007 1 2 3 miss
eat3_c 0.52 0.00 0 0.00 0.00 0 0.04 0.19 0.16 0.09 0
eat4_c 0.81 0.00 0 0.00 0.01 0 0.00 0.08 0.07 0.03 0
eat9_c 0.99 0.01 0 0.00 0.00 0 0.00 0.01 0.00 0.00 0
eat18_c 0.86 0.00 0 0.01 0.00 0 0.00 0.06 0.04 0.02 0
eat21_c 0.76 0.00 0 0.00 0.00 0 0.00 0.11 0.08 0.05 0
eat26_c 0.98 0.00 0 0.00 0.00 0 0.00 0.01 0.00 0.01 0
dados %>% select(eat2_c, eat5_c,eat8_c,eat13_c,eat15_c,eat19_c,eat20_c) %>%
psych::alpha()
Reliability analysis
Call: psych::alpha(x = .)
lower alpha upper 95% confidence boundaries
0.5 0.56 0.61
Reliability if an item is dropped:
Item statistics
Non missing response frequency for each item
0 0.194207836456559 0.294915254237288 0.306913996627319 0.597643097643098 0.648148148148148 0.738175675675676 1 2 3 miss
eat2_c 0.85 0.01 0.00 0 0 0 0.00 0.09 0.02 0.02 0
eat5_c 0.67 0.00 0.00 0 0 0 0.00 0.14 0.12 0.07 0
eat8_c 0.82 0.00 0.00 0 0 0 0.00 0.09 0.04 0.05 0
eat13_c 0.82 0.00 0.01 0 0 0 0.00 0.09 0.05 0.04 0
eat15_c 0.66 0.00 0.00 0 0 0 0.00 0.13 0.11 0.10 0
eat19_c 0.57 0.00 0.00 0 0 0 0.01 0.19 0.16 0.08 0
eat20_c 0.87 0.00 0.00 0 0 0 0.00 0.08 0.03 0.02 0
dados %>%
select(starts_with("bsq") & ends_with("_c"), -bsq_soma_c) %>%
psych::alpha()
Number of categories should be increased in order to count frequencies.
Reliability analysis
Call: psych::alpha(x = .)
lower alpha upper 95% confidence boundaries
0.97 0.97 0.97
Reliability if an item is dropped:
Item statistics
dados %>%
mutate(corte =
if_else(bsq_soma_c >= 110, 1,0)) %>%
{descr::CrossTable(.$country,.$corte,
chisq = T,prop.chisq = F,
expected = T)}
Cell Contents
|-------------------------|
| N |
| Expected N |
| N / Row Total |
| N / Col Total |
| N / Table Total |
|-------------------------|
==================================
.$corte
.$country 0 1 Total
----------------------------------
br 164 56 220
181.9 38.1
0.745 0.255 0.370
0.333 0.544
0.276 0.094
----------------------------------
sp 328 47 375
310.1 64.9
0.875 0.125 0.630
0.667 0.456
0.551 0.079
----------------------------------
Total 492 103 595
0.827 0.173
==================================
Statistics for All Table Factors
Pearson's Chi-squared test
------------------------------------------------------------
Chi^2 = 16.17242 d.f. = 1 p = 5.78e-05
Pearson's Chi-squared test with Yates' continuity correction
------------------------------------------------------------
Chi^2 = 15.28234 d.f. = 1 p = 9.26e-05
dados %>%
#filter(sex_female == "female") %>%
mutate(corte =
if_else(bsq_soma_c >= 110, 1,0)) %>%
{epitools::oddsratio(.$country,.$corte, rev = "r")}
$data
Outcome
Predictor 0 1 Total
sp 328 47 375
br 164 56 220
Total 492 103 595
$measure
odds ratio with 95% C.I.
Predictor estimate lower upper
sp 1.000000 NA NA
br 2.377962 1.546147 3.673224
$p.value
two-sided
Predictor midp.exact fisher.exact chi.square
sp NA NA NA
br 8.17943e-05 7.985849e-05 5.78299e-05
$correction
[1] FALSE
attr(,"method")
[1] "median-unbiased estimate & mid-p exact CI"
dados %>%
mutate(corte =
if_else(eat_soma_c >= 21, 1,0)) %>%
count(corte) %>%
mutate(freq = n / sum(n))
dados %>%
mutate(corte =
if_else(eat_soma_c >= 21, 1,0)) %>%
{descr::CrossTable(.$country,.$corte, prop.chisq = F,
chisq = T,
expected = T)}
Cell Contents
|-------------------------|
| N |
| Expected N |
| N / Row Total |
| N / Col Total |
| N / Table Total |
|-------------------------|
==================================
.$corte
.$country 0 1 Total
----------------------------------
br 160 60 220
189.3 30.7
0.727 0.273 0.370
0.312 0.723
0.269 0.101
----------------------------------
sp 352 23 375
322.7 52.3
0.939 0.061 0.630
0.688 0.277
0.592 0.039
----------------------------------
Total 512 83 595
0.861 0.139
==================================
Statistics for All Table Factors
Pearson's Chi-squared test
------------------------------------------------------------
Chi^2 = 51.6188 d.f. = 1 p = 6.74e-13
Pearson's Chi-squared test with Yates' continuity correction
------------------------------------------------------------
Chi^2 = 49.87275 d.f. = 1 p = 1.64e-12
dados %>%
#filter(sex_female == "female") %>%
mutate(corte =
if_else(eat_soma_c >= 21, 1,0)) %>%
{epitools::oddsratio(.$country,.$corte, rev = "r")}
$data
Outcome
Predictor 0 1 Total
sp 352 23 375
br 160 60 220
Total 512 83 595
$measure
odds ratio with 95% C.I.
Predictor estimate lower upper
sp 1.00000 NA NA
br 5.69834 3.442265 9.731348
$p.value
two-sided
Predictor midp.exact fisher.exact chi.square
sp NA NA NA
br 2.102096e-12 2.098019e-12 6.739306e-13
$correction
[1] FALSE
attr(,"method")
[1] "median-unbiased estimate & mid-p exact CI"
(60*352)/(23*160)
[1] 5.73913
Será que a pessoa que está em risco na EAT, tem valor na BSQ alto?
dados %>%
#filter(sex_female == "female") %>%
mutate(corte_eat =
if_else(eat_soma_c >= 21, 1,0)) %>%
{t.test(bsq_soma_c ~ corte_eat, data =.)}
Welch Two Sample t-test
data: bsq_soma_c by corte_eat
t = -13.739, df = 97.745, p-value < 2.2e-16
alternative hypothesis: true difference in means is not equal to 0
95 percent confidence interval:
-65.54413 -48.99917
sample estimates:
mean in group 0 mean in group 1
65.71439 122.98604
dados %>%
#filter(sex_female == "female") %>%
mutate(corte_bsq =
if_else(bsq_soma_c >= 110, 1,0)) %>%
{t.test(bsq_soma_c ~ corte_bsq, data =.)}
Welch Two Sample t-test
data: bsq_soma_c by corte_bsq
t = -30.262, df = 134.97, p-value < 2.2e-16
alternative hypothesis: true difference in means is not equal to 0
95 percent confidence interval:
-79.65602 -69.88326
sample estimates:
mean in group 0 mean in group 1
60.76023 135.52987
dados%>%
filter(!is.na(sex_female)) %>%
#filter(imc < 24.9) %>%
mutate(corte_bsq =
if_else(bsq_soma_c >= 110, 1,0)) %>%
mutate(corte_eat =
if_else(eat_soma >= 21, 1,0)) %>%
mutate(quadrantes = case_when(
corte_bsq == 1 & corte_eat == 1 ~ "risco",
corte_bsq == 0 & corte_eat == 0 ~ "ok",
corte_bsq == 1 & corte_eat == 0 ~ "risco_bsq",
corte_bsq == 0 & corte_eat == 1 ~ "risco_eat",
)) %>%
group_by(quadrantes, country,sex_female) %>%
summarise(n=n()) %>%
mutate(prop = n/sum(n)) %>%
mutate(prop = round(prop*100,1)) %>%
select(-n) %>%
pivot_wider(names_from = "quadrantes", values_from = "prop")
`summarise()` has grouped output by 'quadrantes', 'country'. You can override using the `.groups` argument.
dados %>%
select(country, sex_female, peso_desejado, peso_atual) %>% #select target variables
rename(Sex = "sex_female") %>% #rename to make easier
mutate(Sex = str_to_sentence(Sex)) %>% #sentene case
na.omit() %>% #don't use missings
mutate(razao = peso_desejado/peso_atual) %>% #fazer razao
group_by(Sex) %>% #agrupar por sexo
mutate(percrank=rank(razao)/length(razao)) %>% #calcular percentil
arrange(Sex,razao) %>%
ggplot(.,
aes(x = razao, y = percrank, colour = Sex, linetype = country)) +
geom_point(size=1) +
geom_line(size = 2, alpha = 0.5) +
geom_vline(xintercept = 1, linetype = "dashed") +
#geom_smooth(level = 0.99) +
labs(x = "Ratio", y = "Percentage") +
theme_bw()
dados %>%
filter(country == "br") %>%
filter(!is.na(sex_female), !is.na(weight_status)) %>%
group_by(sex_female,weight_status) %>%
tabyl(sex_female, weight_status) %>%
adorn_totals(c("row", "col")) %>%
adorn_percentages("row") %>%
adorn_pct_formatting(rounding = "half up", digits = 0) %>%
adorn_ns() %>%
kable()
dados %>%
filter(country == "sp") %>%
filter(!is.na(sex_female), !is.na(weight_status)) %>%
group_by(sex_female,weight_status) %>%
tabyl(sex_female, weight_status) %>%
adorn_totals(c("row", "col")) %>%
adorn_percentages("row") %>%
adorn_pct_formatting(rounding = "half up", digits = 0) %>%
adorn_ns() %>%
kable()
dados %>%
filter(country == "br") %>%
filter(!is.na(sex_female), !is.na(weight_status)) %>%
{descr::crosstab(.$sex_female, .$weight_status, chisq = T, plot = F)}
Cell Contents
|-------------------------|
| Count |
|-------------------------|
===============================================
.$weight_status
.$sex_female ganhar igual perder Total
-----------------------------------------------
male 25 21 37 83
-----------------------------------------------
female 9 18 94 121
-----------------------------------------------
Total 34 39 131 204
===============================================
Statistics for All Table Factors
Pearson's Chi-squared test
------------------------------------------------------------
Chi^2 = 26.39928 d.f. = 2 p = 1.85e-06
Minimum expected frequency: 13.83333
dados %>%
filter(sex_female == "female") %>%
group_by(weight_status) %>%
summarise(mean(imc), mean(peso_atual),mean(altura), n())
dados %>%
filter(!is.na(faz_esporte)) %>%
ggplot(., aes(x= imc, y = bsq_soma_c, fill = faz_esporte)) +
geom_jitter() +
geom_smooth(method = "lm")
dados %>%
lm(bsq_soma_c ~ imc + faz_esporte + country + sex_female, data = .) %>%
apaTables::apa.aov.table()
olsrr::ols_regress(.)
dados %>%
lm(eat_soma_c ~ imc + faz_esporte + country + sex_female, data = .) %>%
apaTables::apa.aov.table()
ANOVA results using eat_soma_c as the dependent variable
Note: Values in square brackets indicate the bounds of the 90% confidence interval for partial eta-squared
olsrr::ols_regress(.)
Error in olsrr::ols_regress(.) : object '.' not found
#install.packages("NetworkComparisonTest")
library(NetworkComparisonTest)
package 㤼㸱NetworkComparisonTest㤼㸲 was built under R version 4.0.5
netw<-NCT(data1, data2,make.positive.definite=TRUE, test.edges=FALSE, edges="all")
|
| | 0%
|
|== | 1%
|
|=== | 2%
|
|===== | 3%
|
|====== | 4%
|
|======== | 5%
|
|========== | 6%
|
|=========== | 7%
|
|============= | 8%
|
|============== | 9%
|
|================ | 10%
|
|================= | 11%
|
|=================== | 12%
|
|===================== | 13%
|
|====================== | 14%
|
|======================== | 15%
|
|========================= | 16%
|
|=========================== | 17%
|
|============================= | 18%
|
|============================== | 19%
|
|================================ | 20%
|
|================================= | 21%
|
|=================================== | 22%
|
|===================================== | 23%
|
|====================================== | 24%
|
|======================================== | 25%
|
|========================================= | 26%
|
|=========================================== | 27%
|
|============================================= | 28%
|
|============================================== | 29%
|
|================================================ | 30%
|
|================================================= | 31%
|
|=================================================== | 32%
|
|==================================================== | 33%
|
|====================================================== | 34%
|
|======================================================== | 35%
|
|========================================================= | 36%
|
|=========================================================== | 37%
|
|============================================================ | 38%
|
|============================================================== | 39%
|
|================================================================ | 40%
|
|================================================================= | 41%
|
|=================================================================== | 42%
|
|==================================================================== | 43%
|
|====================================================================== | 44%
|
|======================================================================== | 45%
|
|========================================================================= | 46%
|
|=========================================================================== | 47%
|
|============================================================================ | 48%
|
|============================================================================== | 49%
|
|================================================================================ | 50%
|
|================================================================================= | 51%
|
|=================================================================================== | 52%
|
|==================================================================================== | 53%
|
|====================================================================================== | 54%
|
|======================================================================================= | 55%
|
|========================================================================================= | 56%
|
|=========================================================================================== | 57%
|
|============================================================================================ | 58%
|
|============================================================================================== | 59%
|
|=============================================================================================== | 60%
|
|================================================================================================= | 61%
|
|=================================================================================================== | 62%
|
|==================================================================================================== | 63%
|
|====================================================================================================== | 64%
|
|======================================================================================================= | 65%
|
|========================================================================================================= | 66%
|
|=========================================================================================================== | 67%
|
|============================================================================================================ | 68%
|
|============================================================================================================== | 69%
|
|=============================================================================================================== | 70%
|
|================================================================================================================= | 71%
|
|================================================================================================================== | 72%
|
|==================================================================================================================== | 73%
|
|====================================================================================================================== | 74%
|
|======================================================================================================================= | 75%
|
|========================================================================================================================= | 76%
|
|========================================================================================================================== | 77%
|
|============================================================================================================================ | 78%
|
|============================================================================================================================== | 79%
|
|=============================================================================================================================== | 80%
|
|================================================================================================================================= | 81%
|
|================================================================================================================================== | 82%
|
|==================================================================================================================================== | 83%
|
|====================================================================================================================================== | 84%
|
|======================================================================================================================================= | 85%
|
|========================================================================================================================================= | 86%
|
|========================================================================================================================================== | 87%
|
|============================================================================================================================================ | 88%
|
|============================================================================================================================================== | 89%
|
|=============================================================================================================================================== | 90%
|
|================================================================================================================================================= | 91%
|
|================================================================================================================================================== | 92%
|
|==================================================================================================================================================== | 93%
|
|===================================================================================================================================================== | 94%
|
|======================================================================================================================================================= | 95%
|
|========================================================================================================================================================= | 96%
|
|========================================================================================================================================================== | 97%
|
|============================================================================================================================================================ | 98%
|
|============================================================================================================================================================= | 99%
|
|===============================================================================================================================================================| 100%
plot(netw,what="network")
dados %>%
na.omit() -> write.csv(., "dados_pesquisa_puc_no_missing.csv", row.names = F)
Error in write.csv(., "dados_pesquisa_puc_no_missing.csv", row.names = F) <- dados %>% :
object '.' not found
plot(Layout)
Error in as.double(y) :
cannot coerce type 'environment' to vector of type 'double'
centralityPlot(GGM = list(female_br = graph_female_br, female_sp = graph_female_sp))
Note: z-scores are shown on x-axis rather than raw centrality indices.
dados %>%
filter(sex_female == "female") %>% #filtrar para apenas mulheres
select(country, eat1_c:eat26_c) %>% #deixar paÃses e escalas
pivot_longer(cols = -country, values_to = 'value1') %>% #mudar base
group_by(country,name) %>% #criar agrupador por paÃses e itens
summarise_at(vars(value1), list(mean=mean, sd=sd)) %>% #criar sumario
pivot_wider(names_from = country, values_from=mean:sd) %>% #apresentar o sumario como tablea
mutate(cohen = (mean_br-mean_sp)/((sd_br+sd_br)/2)) %>%
mutate(cohen_int = factor(case_when(
cohen < abs(0.1) ~ "neg",
cohen < abs(0.3) ~ "small",
cohen < abs(0.5) ~ "med",
TRUE ~ "strong"), levels=c("neg","small","med", "strong"))) %>%
arrange(desc(cohen)) %>%
{length(.$cohen_int[.$cohen_int == "strong"])}
[1] 11
dados %>%
filter(sex_female == "female") %>% #filtrar para apenas mulheres
select(country, bsq1_c:bsq34_c) %>% #deixar paÃses e escalas
pivot_longer(cols = -country, values_to = 'value1') %>% #mudar base
group_by(country,name) %>% #criar agrupador por paÃses e itens
summarise_at(vars(value1), list(mean=mean, sd=sd)) %>% #criar sumario
pivot_wider(names_from = country, values_from=mean:sd) %>% #apresentar o sumario como tablea
mutate(cohen = (mean_br-mean_sp)/((sd_br+sd_br)/2)) %>%
mutate(cohen_int = factor(case_when(
cohen < abs(0.1) ~ "neg",
cohen < abs(0.3) ~ "small",
cohen < abs(0.5) ~ "med",
TRUE ~ "strong"), levels=c("neg","small","med", "strong"))) %>% #cohens interpretation
select(-contains("sd")) %>%
pivot_longer(-c(name, cohen, cohen_int), names_to ="pais") %>%
mutate(name = factor(name)) %>%
mutate(name = fct_reorder(name, cohen, .desc = TRUE)) %>% #change X order by cohens d
mutate(pais = if_else(pais == "mean_br","BR","SP")) %>% #change label of pais
ggplot(., aes(x = name, y = value, fill = pais, group=pais)) +
geom_col(stat = "summary", position = "dodge") +
#geom_vline(aes(xintercept=11), linetype="dashed",colour="blue",size=0.7) +
labs(x = "Itens", y = "Resultado", fill = "PaÃs")+
theme_bw() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1)) +
#facet_grid(. ~ cohen_int) +
facet_wrap(~cohen_int) +
theme(text = element_text(size=15))
Ignoring unknown parameters: stat
dados %>%
filter(sex_female == "female") %>% #filtrar para apenas mulheres
select(country, bsq:eat26_c) %>% #deixar paÃses e escalas
pivot_longer(cols = -country, values_to = 'value1') %>% #mudar base
group_by(country,name) %>% #criar agrupador por paÃses e itens
summarise_at(vars(value1), list(mean=mean, sd=sd)) %>% #criar sumario
pivot_wider(names_from = country, values_from=mean:sd) %>% #apresentar o sumario como tablea
mutate(cohen = (mean_br-mean_sp)/((sd_br+sd_br)/2)) %>%
mutate(cohen_int = factor(case_when(
cohen < abs(0.1) ~ "neg",
cohen < abs(0.3) ~ "small",
cohen < abs(0.5) ~ "med",
TRUE ~ "strong"), levels=c("neg","small","med", "strong"))) %>% #cohens interpretation
select(-contains("sd")) %>%
pivot_longer(-c(name, cohen, cohen_int), names_to ="pais") %>%
mutate(name = factor(name)) %>%
mutate(name = fct_reorder(name, cohen, .desc = TRUE)) %>% #change X order by cohens d
mutate(pais = if_else(pais == "mean_br","BR","SP")) %>% #change label of pais
ggplot(., aes(x = name, y = value, fill = pais, group=pais)) +
geom_col(stat = "summary", position = "dodge") +
#geom_vline(aes(xintercept=11), linetype="dashed",colour="blue",size=0.7) +
labs(x = "Itens", y = "Resultado", fill = "PaÃs")+
theme_bw() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1)) +
#facet_grid(. ~ cohen_int) +
#facet_wrap(~cohen_int) +
theme(text = element_text(size=15))
library(likert)
Loading required package: xtable
Attaching package: 㤼㸱likert㤼㸲
The following object is masked from 㤼㸱package:dplyr㤼㸲:
recode
dados_eat_likert <- dados %>% #get dataset
select(sex_female, eat1_c:eat26_c) %>%
as.data.frame() %>% #get dataset
na.omit() %>%
mutate_at(vars(-sex_female), ~factor(., levels=0:3))
plot(likert(dados_eat_likert[,2:5], grouping=dados_eat_likert$sex_female))
library(patchwork)
p1 <- dados %>% #get dataset
filter(country == "br") %>%
select(sex_female, eat1_c:eat26_c) %>%
as.data.frame() %>% #get dataset
na.omit() %>%
mutate_at(vars(-sex_female), ~factor(., levels=0:3)) %>%
{likert(.[,2:10], grouping=.[,1])} %>%
plot() +
ggthemes::theme_few() +
ylab("Br") +
theme(legend.position = "none")
p2 <- dados %>% #get dataset
filter(country == "sp") %>%
select(sex_female, eat1_c:eat26_c) %>%
as.data.frame() %>% #get dataset
na.omit() %>%
mutate_at(vars(-sex_female), ~factor(., levels=0:3)) %>%
{likert(.[,2:10], grouping=.[,1])} %>%
plot() +
ylab("Sp") +
ggthemes::theme_few()
p1+p2
dados %>%
select(country, sex_female, eat1_c:eat26_c) %>%
na.omit() %>%
pivot_longer(cols = -c(country, sex_female),
names_to = "item", values_to = "categoria") %>%
group_by(item, country, sex_female, categoria) %>%
summarise(N = n()) %>%
mutate(Pct = N / sum(N)) %>%
mutate(categoria = factor(round(categoria,0))) %>%
ggplot(aes(x = item, y = Pct, fill = categoria)) +
geom_bar(position="fill", stat="identity")+
scale_y_continuous(labels = scales::percent) +
#geom_text(aes(x=item, y=Pct, group=categoria,label = paste(Pct*100, "%"))) +
coord_flip() +
facet_wrap(~sex_female + country) +
theme_bw() +
theme(legend.position = "bottom")
`summarise()` has grouped output by 'item', 'country', 'sex_female'. You can override using the `.groups` argument.
#Hx: Sex and country differences
apaTables::apa.aov.table(mod_sex_country_bsq)
ANOVA results using bsq_soma_c as the dependent variable
Note: Values in square brackets indicate the bounds of the 90% confidence interval for partial eta-squared
dados %>%
filter(country == "sp") %>%
select(eat_soma_c, bsq_soma_c) %>%
cor()
eat_soma_c bsq_soma_c
eat_soma_c 1.0000000 0.6778482
bsq_soma_c 0.6778482 1.0000000
dados %>%
filter(country == "br") %>%
select(eat_soma_c, bsq_soma_c) %>%
cor()
eat_soma_c bsq_soma_c
eat_soma_c 1.000000 0.694056
bsq_soma_c 0.694056 1.000000
apaTables::apa.aov.table(mod_sex_country_ead)
ANOVA results using eat_soma_c as the dependent variable
Note: Values in square brackets indicate the bounds of the 90% confidence interval for partial eta-squared
apaTables::apa.aov.table(mod_sex_country_eat_dieta)
ANOVA results using eat_dieta as the dependent variable
Note: Values in square brackets indicate the bounds of the 90% confidence interval for partial eta-squared
apaTables::apa.aov.table(mod_sex_country_eat_bulimia)
ANOVA results using eat_bulimia as the dependent variable
Note: Values in square brackets indicate the bounds of the 90% confidence interval for partial eta-squared
mod_sex_country_eat_controle <- lm(eat_controle ~ sex_female * country, dados)
Error in eval(predvars, data, env) : object 'eat_controle' not found
apaTables::apa.aov.table(mod_sex_country_eat_controle)
ANOVA results using eat_oral as the dependent variable
Note: Values in square brackets indicate the bounds of the 90% confidence interval for partial eta-squared
##2: Weight differences