Objetivo:

require(DT) #para a fun??o datatable
require(tidyverse)
require(mice)       #para imputação
require(broom.mixed)
require(broom)
require(lme4)
require(readxl)
require(tidyr)
require(factoextra) #para o PCA
require(redres)     #para os residuos
require(geoR)     #para o Box cox de dois parametros
require(corrplot)

Planilha de dados

dados=read_excel("dados_renato.xlsx",sheet=1)
names(dados)=c("Ano","UF","Cod.UF","tx.latrc","tx.pres","gini.ibge","perc.jov.1524","perc.hom","pbf","densidade.urbana1","densidade.urbana2","taxa.casamentos","Taxa.desligamentos","raz.2020","raz.1040")
head(dados,options = list(pageLength = 5))
## # A tibble: 6 x 15
##     Ano UF    Cod.UF tx.latrc tx.pres gini.ibge perc.jov.1524 perc.hom   pbf
##   <dbl> <chr>  <dbl>    <dbl>   <dbl>     <dbl>         <dbl>    <dbl> <dbl>
## 1  2005 Rond~      1    1.30     298.     0.544          19.7     49.6  62.7
## 2  2006 Rond~      1    1.60     272.     0.537          20.2     50.7  84.6
## 3  2007 Rond~      1    1.51     354.     0.472          19.7     49.7 110. 
## 4  2008 Rond~      1    1.14     400.     0.478          19.1     50.2 119. 
## 5  2009 Rond~      1    0.997    465.     0.49           19.6     49.9 139. 
## 6  2010 Rond~      1    1.67     476.    NA              NA       NA   145. 
## # ... with 6 more variables: densidade.urbana1 <dbl>, densidade.urbana2 <dbl>,
## #   taxa.casamentos <dbl>, Taxa.desligamentos <dbl>, raz.2020 <dbl>,
## #   raz.1040 <dbl>

Variaveis Escolhidas para entrar no modelo:

Verificando os missing das variáveis:

variaveis = c("tx.latrc","Ano","UF","Cod.UF","perc.jov.1524","pbf","gini.ibge","Taxa.desligamentos","perc.hom","densidade.urbana1")
dados1 = dados %>%
  select(variaveis)
## Note: Using an external vector in selections is ambiguous.
## i Use `all_of(variaveis)` instead of `variaveis` to silence this message.
## i See <https://tidyselect.r-lib.org/reference/faq-external-vector.html>.
## This message is displayed once per session.
summary(dados1)
##     tx.latrc            Ano            UF                Cod.UF  
##  Min.   :0.03206   Min.   :2005   Length:297         Min.   : 1  
##  1st Qu.:0.64466   1st Qu.:2007   Class :character   1st Qu.: 7  
##  Median :0.94540   Median :2010   Mode  :character   Median :14  
##  Mean   :1.09195   Mean   :2010                      Mean   :14  
##  3rd Qu.:1.35786   3rd Qu.:2013                      3rd Qu.:21  
##  Max.   :5.42341   Max.   :2015                      Max.   :27  
##  NA's   :16                                                      
##  perc.jov.1524         pbf           gini.ibge      Taxa.desligamentos
##  Min.   : 9.711   Min.   : 22.04   Min.   :0.4190   Min.   :0.01196   
##  1st Qu.:16.774   1st Qu.: 79.91   1st Qu.:0.4783   1st Qu.:0.03625   
##  Median :18.242   Median :137.69   Median :0.5025   Median :0.05304   
##  Mean   :18.076   Mean   :159.97   Mean   :0.5055   Mean   :0.06777   
##  3rd Qu.:19.500   3rd Qu.:231.84   3rd Qu.:0.5272   3rd Qu.:0.09978   
##  Max.   :23.500   Max.   :423.79   Max.   :0.6150   Max.   :0.17911   
##  NA's   :27                        NA's   :27                         
##     perc.hom     densidade.urbana1
##  Min.   :46.80   Min.   : 34.29   
##  1st Qu.:48.40   1st Qu.: 45.06   
##  Median :49.00   Median : 63.79   
##  Mean   :49.06   Mean   : 73.50   
##  3rd Qu.:49.80   3rd Qu.: 97.48   
##  Max.   :52.40   Max.   :160.29   
##  NA's   :27
summary(dados)
##       Ano            UF                Cod.UF      tx.latrc      
##  Min.   :2005   Length:297         Min.   : 1   Min.   :0.03206  
##  1st Qu.:2007   Class :character   1st Qu.: 7   1st Qu.:0.64466  
##  Median :2010   Mode  :character   Median :14   Median :0.94540  
##  Mean   :2010                      Mean   :14   Mean   :1.09195  
##  3rd Qu.:2013                      3rd Qu.:21   3rd Qu.:1.35786  
##  Max.   :2015                      Max.   :27   Max.   :5.42341  
##                                                 NA's   :16       
##     tx.pres         gini.ibge      perc.jov.1524       perc.hom    
##  Min.   : 51.71   Min.   :0.4190   Min.   : 9.711   Min.   :46.80  
##  1st Qu.:156.22   1st Qu.:0.4783   1st Qu.:16.774   1st Qu.:48.40  
##  Median :228.64   Median :0.5025   Median :18.242   Median :49.00  
##  Mean   :252.51   Mean   :0.5055   Mean   :18.076   Mean   :49.06  
##  3rd Qu.:337.17   3rd Qu.:0.5272   3rd Qu.:19.500   3rd Qu.:49.80  
##  Max.   :634.53   Max.   :0.6150   Max.   :23.500   Max.   :52.40  
##  NA's   :2        NA's   :27       NA's   :27       NA's   :27     
##       pbf         densidade.urbana1 densidade.urbana2 taxa.casamentos   
##  Min.   : 22.04   Min.   : 34.29    Min.   : 26.95    Min.   :0.001847  
##  1st Qu.: 79.91   1st Qu.: 45.06    1st Qu.: 38.80    1st Qu.:0.003916  
##  Median :137.69   Median : 63.79    Median : 53.12    Median :0.004859  
##  Mean   :159.97   Mean   : 73.50    Mean   : 57.94    Mean   :0.004876  
##  3rd Qu.:231.84   3rd Qu.: 97.48    3rd Qu.: 75.40    3rd Qu.:0.005605  
##  Max.   :423.79   Max.   :160.29    Max.   :131.26    Max.   :0.008422  
##                                     NA's   :27                          
##  Taxa.desligamentos    raz.2020         raz.1040     
##  Min.   :0.01196    Min.   : 9.034   Min.   : 8.349  
##  1st Qu.:0.03625    1st Qu.:14.177   1st Qu.:13.053  
##  Median :0.05304    Median :16.047   Median :14.912  
##  Mean   :0.06777    Mean   :16.825   Mean   :15.515  
##  3rd Qu.:0.09978    3rd Qu.:18.717   3rd Qu.:17.426  
##  Max.   :0.17911    Max.   :32.329   Max.   :27.742  
##                     NA's   :54       NA's   :54

Calculando o Número necessário de imputações:

[1] Rubin, D.B. (1987). Multiple Imputation for Nonresponse in Surveys. New York: John Wiley and Sons. Zaninotto, P.; Sacker, A. (2017) Missing Data in Longitudinal Surveys: A Comparison of Performance of Modern Techniques. https://digitalcommons.wayne.edu/cgi/viewcontent.cgi?article=2384&context=jmasm

lambda=1-nrow(dados1[complete.cases(dados1), ])/nrow(dados1)
a=0.99
N=round(lambda/(1/a-1),0)
N
## [1] 14

Transformando em wide table para imputar:

dados1_wide = dados1 %>%
  pivot_wider(id_cols = Cod.UF,names_from = Ano, names_glue = "{.value}_{Ano}",values_from = c(tx.latrc,perc.jov.1524,pbf,gini.ibge,Taxa.desligamentos,perc.hom,densidade.urbana1))

Iniciando a imputação:

imp1 <- mice(dados1_wide, m = N,print=FALSE)
## Warning: Number of logged events: 473

Imputando os dados

imp_comp1 <- mice::complete(imp1, "all")
a1=imp_comp1[[1]]

Função para pivotar os dados imputados:

pivotar=function(dataset){
dataset %>%
  pivot_longer(
    !Cod.UF,
    names_to = c("Variavel", "Ano"),
    names_sep = "_")%>%
pivot_wider(
    names_from = Variavel,
    values_from = value )%>%
mutate(
  Ano=as.numeric(Ano),
  Cod.UF=as.factor(Cod.UF))
}
 a1= pivotar(imp_comp1[[1]])

Pivotando os dados imputados para formato long table:

imp_comp2 = imp_comp1 %>%
lapply(pivotar) 
a1=imp_comp2[[1]]

Modelo 1: Modelo com efeito aleatório no Ano (cada estado tem seu coeficiente de Ano) As variáveis significativas são:

formula1 =  "tx.latrc ~ Ano  + perc.jov.1524 + pbf + gini.ibge + Taxa.desligamentos + perc.hom + densidade.urbana1 + (Ano | Cod.UF)"

modelo_1 <- lapply(imp_comp2, lmer, formula =formula1)
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## unable to evaluate scaled gradient
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge: degenerate Hessian with 1 negative eigenvalues
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## unable to evaluate scaled gradient
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge: degenerate Hessian with 1 negative eigenvalues
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
summary(pool(modelo_1), conf.int = TRUE)
##                 term      estimate    std.error     statistic       df
## 1        (Intercept) -1.443729e+02 5.303133e+01 -2.7224073740 247.5757
## 2                Ano  7.102654e-02 2.617048e-02  2.7139943556 249.7666
## 3      perc.jov.1524 -5.721289e-03 2.965749e-02 -0.1929121258 254.8117
## 4                pbf -1.536396e-03 9.880897e-04 -1.5549157134 247.2582
## 5          gini.ibge -1.045922e-03 1.621924e+00 -0.0006448652 231.6378
## 6 Taxa.desligamentos -7.197367e+00 2.858588e+00 -2.5178053286 251.1250
## 7           perc.hom  7.104641e-02 6.418192e-02  1.1069536235 135.3406
## 8  densidade.urbana1  8.920972e-04 3.095820e-03  0.2881618963 226.6340
##       p.value         2.5 %        97.5 %
## 1 0.006941541 -2.488230e+02 -3.992279e+01
## 2 0.007110814  1.948358e-02  1.225695e-01
## 3 0.847181458 -6.412629e-02  5.268372e-02
## 4 0.121245156 -3.482542e-03  4.097498e-04
## 5 0.999486027 -3.196654e+00  3.194563e+00
## 6 0.012431790 -1.282723e+01 -1.567506e+00
## 7 0.270277561 -5.588279e-02  1.979756e-01
## 8 0.773486173 -5.208174e-03  6.992368e-03

Pendencias:

Resolvendo as pendências:

AIC do modelo 1 é a média dos AICs dos N modelos baseados em N dados imputados

AIC=lapply(modelo_1,AIC)%>%
    unlist()%>%
    mean()
AIC
## [1] 498.566

Redíduos condicionais:

rc_resids <- compute_redres(modelo_1[[1]])
## Loading required namespace: testthat
plot(rc_resids,main="resíduos condicionais versus índices")

plot_resqq(modelo_1[[1]])

Alternativa: Aplicação de Box Cox para o primeiro banco de dados - para verificar qual transformação - neste caso foi utilizado modelo linear (sem ser misto) e somente com a variável Ano

bc2 <- boxcoxfit(imp_comp2[[1]]$tx.latrc,imp_comp2[[1]]$Ano, lambda2 = TRUE)
l1 <- bc2$lambda[1]
l2 <- bc2$lambda[2]
l1
##    lambda 
## -1.911845
l2
## lambda2 
## 3.00257

Nova estimação: Criando a variável transformada em todos os bancos

l1
##    lambda 
## -1.911845
l2
## lambda2 
## 3.00257
trans=function(data){
data %>%
    mutate(ytrans = ((tx.latrc + l2)^l1-1)/l1)
}

imp_comp3 = imp_comp2 %>%
lapply(trans) 
a1=imp_comp3[[1]]
hist(a1$tx.latrc)

hist(a1$ytrans)

Ajustando modelo com transformação:

formula2 =  "ytrans ~ Ano  + perc.jov.1524 + pbf + gini.ibge + Taxa.desligamentos + perc.hom + densidade.urbana1 + (Ano | Cod.UF)"

modelo_2 <- lapply(imp_comp3, lmer, formula =formula2)
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## unable to evaluate scaled gradient
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge: degenerate Hessian with 1 negative eigenvalues
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## unable to evaluate scaled gradient
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge: degenerate Hessian with 1 negative eigenvalues
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
## Warning: Some predictor variables are on very different scales: consider
## rescaling
## boundary (singular) fit: see ?isSingular
summary(pool(modelo_2), conf.int = TRUE)
##                 term      estimate    std.error  statistic       df     p.value
## 1        (Intercept) -1.730115e+00 7.526430e-01 -2.2987194 250.7797 0.022343793
## 2                Ano  1.091214e-03 3.718509e-04  2.9345466 251.9334 0.003648603
## 3      perc.jov.1524 -6.501304e-05 4.229665e-04 -0.1537073 253.6830 0.877962726
## 4                pbf -1.387150e-05 1.403960e-05 -0.9880273 249.5522 0.324096267
## 5          gini.ibge  7.058729e-03 2.324246e-02  0.3036998 234.3480 0.761626107
## 6 Taxa.desligamentos -9.244382e-02 4.138488e-02 -2.2337581 251.4782 0.026378717
## 7           perc.hom  5.712140e-04 8.967967e-04  0.6369492 170.7264 0.525011083
## 8  densidade.urbana1  9.824996e-06 4.526585e-05  0.2170510 239.4096 0.828353476
##           2.5 %        97.5 %
## 1 -3.2124220341 -2.478083e-01
## 2  0.0003588814  1.823546e-03
## 3 -0.0008979861  7.679600e-04
## 4 -0.0000415227  1.377970e-05
## 5 -0.0387321332  5.284959e-02
## 6 -0.1739489532 -1.093869e-02
## 7 -0.0011990238  2.341452e-03
## 8 -0.0000793452  9.899519e-05
AIC=lapply(modelo_2,AIC)%>%
    unlist()%>%
    mean()
AIC
## [1] -1727.854

Redíduos condicionais Modelo 2 - transformado:

rc_resids <- compute_redres(modelo_2[[1]])
plot(rc_resids,main="resíduos condicionais versus índices")

plot_resqq(modelo_2[[1]])

shapiro.test(rc_resids)
## 
##  Shapiro-Wilk normality test
## 
## data:  rc_resids
## W = 0.99521, p-value = 0.5682

Análise dos efeitos aleatorios

random <- ranef(modelo_2[[1]])
aleatorio = random[["Cod.UF"]][["(Intercept)"]]
aleatorio2= random[["Cod.UF"]][["Ano"]]
plot(aleatorio,main="efeitos aleatórios versus índices")
abline(h=55, lty=3)
abline(h=-55, lty=3)
abline(h=0,lty=3,col=4)

plot_ranef(modelo_2[[1]])

shapiro.test(aleatorio)
## 
##  Shapiro-Wilk normality test
## 
## data:  aleatorio
## W = 0.9718, p-value = 0.65
shapiro.test(aleatorio2)
## 
##  Shapiro-Wilk normality test
## 
## data:  aleatorio2
## W = 0.9718, p-value = 0.65

Idéia: Teste shapiro dos resíduos condicionais:

rc_resids2 <- lapply(modelo_2,compute_redres)
shapiro=lapply(rc_resids2,shapiro.test)
shapiro
## $`1`
## 
##  Shapiro-Wilk normality test
## 
## data:  X[[i]]
## W = 0.99521, p-value = 0.5682
## 
## 
## $`2`
## 
##  Shapiro-Wilk normality test
## 
## data:  X[[i]]
## W = 0.99462, p-value = 0.4591
## 
## 
## $`3`
## 
##  Shapiro-Wilk normality test
## 
## data:  X[[i]]
## W = 0.9945, p-value = 0.4387
## 
## 
## $`4`
## 
##  Shapiro-Wilk normality test
## 
## data:  X[[i]]
## W = 0.99342, p-value = 0.284
## 
## 
## $`5`
## 
##  Shapiro-Wilk normality test
## 
## data:  X[[i]]
## W = 0.99336, p-value = 0.2767
## 
## 
## $`6`
## 
##  Shapiro-Wilk normality test
## 
## data:  X[[i]]
## W = 0.99407, p-value = 0.372
## 
## 
## $`7`
## 
##  Shapiro-Wilk normality test
## 
## data:  X[[i]]
## W = 0.995, p-value = 0.5278
## 
## 
## $`8`
## 
##  Shapiro-Wilk normality test
## 
## data:  X[[i]]
## W = 0.99255, p-value = 0.1945
## 
## 
## $`9`
## 
##  Shapiro-Wilk normality test
## 
## data:  X[[i]]
## W = 0.99472, p-value = 0.4769
## 
## 
## $`10`
## 
##  Shapiro-Wilk normality test
## 
## data:  X[[i]]
## W = 0.99541, p-value = 0.6069
## 
## 
## $`11`
## 
##  Shapiro-Wilk normality test
## 
## data:  X[[i]]
## W = 0.99327, p-value = 0.2661
## 
## 
## $`12`
## 
##  Shapiro-Wilk normality test
## 
## data:  X[[i]]
## W = 0.99452, p-value = 0.4423
## 
## 
## $`13`
## 
##  Shapiro-Wilk normality test
## 
## data:  X[[i]]
## W = 0.99523, p-value = 0.5717
## 
## 
## $`14`
## 
##  Shapiro-Wilk normality test
## 
## data:  X[[i]]
## W = 0.99366, p-value = 0.3143

Gráfico de correlograma por estado:

l1
##    lambda 
## -1.911845
l2
## lambda2 
## 3.00257
variaveis = c("tx.latrc","Ano","UF","Cod.UF","perc.jov.1524","pbf","gini.ibge","Taxa.desligamentos","perc.hom","densidade.urbana1","tx.pres","taxa.casamentos")
dados1 = dados %>%
  select(variaveis)

dados1 = dados1 %>%
  mutate(ytrans = ((tx.latrc + l2)^l1-1)/l1)

#só dá pra fazer correlação com os dados completos (sem missing)
teste=dados1[complete.cases(dados1),]

correlacao=list()
i=1
while(i<=27){
  estados = teste %>% filter(Cod.UF==i)
    titulo=estados$UF
  estados = estados %>% 
    select(-c(UF,Cod.UF))
  correlacao[[i]]=cor(estados)
 corrplot.mixed(cor(estados),lower.col = "black", number.cex = 1,main=titulo) 
i=i+1
}

Gráfico de correlograma:

Neste gráfico de correlograma sem ser por estados, não é possível identificar correlações!! elas se perdem nos estados!! #só dá pra fazer correlação com os dados completos (sem missing)

teste=dados1[complete.cases(dados1),]

teste2  = teste %>% 
    select(-c(UF,Cod.UF))
  corrplot.mixed(cor(teste2),lower.col = "black", number.cex = 1)

Agora criando um gráfico de correlações para ver como se comporta ao longo dos estados

print("valores de correlacao para o estado de Rondonia")
## [1] "valores de correlacao para o estado de Rondonia"
correlacao[[1]]
##                      tx.latrc        Ano perc.jov.1524        pbf  gini.ibge
## tx.latrc            1.0000000 -0.8438022     0.7794218 -0.8274713  0.6971187
## Ano                -0.8438022  1.0000000    -0.9198848  0.8277164 -0.8352030
## perc.jov.1524       0.7794218 -0.9198848     1.0000000 -0.6008418  0.7682724
## pbf                -0.8274713  0.8277164    -0.6008418  1.0000000 -0.8370118
## gini.ibge           0.6971187 -0.8352030     0.7682724 -0.8370118  1.0000000
## Taxa.desligamentos -0.8979143  0.8444182    -0.6776513  0.9244673 -0.7287720
## perc.hom           -0.4085620  0.5630963    -0.3763502  0.5547607 -0.3176786
## densidade.urbana1   0.4794933 -0.5295523     0.3606178 -0.7659649  0.7811743
## tx.pres            -0.7134867  0.8883842    -0.8391680  0.7105695 -0.7872394
## taxa.casamentos    -0.8431928  0.9731741    -0.8761485  0.8356287 -0.7861810
## ytrans              0.9943131 -0.8459905     0.7754893 -0.8292293  0.7005267
##                    Taxa.desligamentos    perc.hom densidade.urbana1    tx.pres
## tx.latrc                   -0.8979143 -0.40856204        0.47949334 -0.7134867
## Ano                         0.8444182  0.56309628       -0.52955229  0.8883842
## perc.jov.1524              -0.6776513 -0.37635015        0.36061777 -0.8391680
## pbf                         0.9244673  0.55476070       -0.76596489  0.7105695
## gini.ibge                  -0.7287720 -0.31767861        0.78117427 -0.7872394
## Taxa.desligamentos          1.0000000  0.54114979       -0.64174972  0.6734873
## perc.hom                    0.5411498  1.00000000       -0.04930337  0.2703692
## densidade.urbana1          -0.6417497 -0.04930337        1.00000000 -0.6567274
## tx.pres                     0.6734873  0.27036924       -0.65672743  1.0000000
## taxa.casamentos             0.9014743  0.56153063       -0.53950413  0.8384147
## ytrans                     -0.8925181 -0.44944694        0.43883757 -0.6758367
##                    taxa.casamentos     ytrans
## tx.latrc                -0.8431928  0.9943131
## Ano                      0.9731741 -0.8459905
## perc.jov.1524           -0.8761485  0.7754893
## pbf                      0.8356287 -0.8292293
## gini.ibge               -0.7861810  0.7005267
## Taxa.desligamentos       0.9014743 -0.8925181
## perc.hom                 0.5615306 -0.4494469
## densidade.urbana1       -0.5395041  0.4388376
## tx.pres                  0.8384147 -0.6758367
## taxa.casamentos          1.0000000 -0.8402260
## ytrans                  -0.8402260  1.0000000
#cria um banco de dados de correlações, somente com ytrans e as outras variaveis
correl_todos=c()
i=1
while(i<=27){
correl=data.frame(correlacao[[i]])
correl1=correl$ytrans
names(correl1)=c("tx.latr","Ano","perc.jov.1524","pbf","gini.ibge","Taxa.desligamentos","perc.hom","densidade.urbana1","tx.pres","taxa.casamentos","ytrans")
correl_todos=rbind(correl_todos,correl1)
i=i+1
}
print("valores de correlacao de ytrans com as outras variaveis")
## [1] "valores de correlacao de ytrans com as outras variaveis"
correl_todos
##           tx.latr         Ano perc.jov.1524         pbf   gini.ibge
## correl1 0.9943131 -0.84599050    0.77548926 -0.82922932  0.70052667
## correl1 0.9921504  0.72312425   -0.27882655  0.62113963 -0.45933279
## correl1 0.9897239  0.87640417   -0.34832305  0.78357266  0.35514439
## correl1 0.9869996  0.76439686   -0.46215435  0.56357486 -0.51650777
## correl1 0.9905846 -0.34660223    0.27265889 -0.37112707  0.11029840
## correl1 0.9814112  0.39673735   -0.80076673  0.41423878 -0.72046462
## correl1 0.9958380 -0.05329137    0.20262171  0.05300161 -0.31482471
## correl1 0.9969543  0.46521790   -0.39296750  0.31177242 -0.44363257
## correl1 0.9916645  0.85515612   -0.55365235  0.84446769 -0.75785890
## correl1 0.9956740 -0.65079214    0.72599098 -0.47504438  0.57177604
## correl1 0.9904150  0.22547550   -0.15475770  0.04425063 -0.49830181
## correl1 0.9958239 -0.17350317    0.06709496 -0.30880693  0.37589661
## correl1 0.9577949 -0.77551941    0.84007337 -0.88451632  0.89061234
## correl1 0.9879361  0.75109366   -0.52970894  0.82287790 -0.75967658
## correl1 0.9890593  0.93369181   -0.71350539  0.89894829 -0.23738907
## correl1 0.9988395  0.77418231   -0.58953791  0.70496696 -0.76070247
## correl1 0.9966946 -0.15940353    0.30953162 -0.13181995  0.27844959
## correl1 0.9916368  0.87879959   -0.84080832  0.89873569 -0.85132654
## correl1 0.9985521 -0.82855906    0.60695945 -0.82436232  0.87050959
## correl1 0.9989739  0.60425700   -0.43606886  0.60304631 -0.29141789
## correl1 0.9991751  0.15123639   -0.09424226 -0.18206776 -0.08243027
## correl1 0.9965860  0.81669634   -0.78393293  0.67836377 -0.75053473
## correl1 0.9982751  0.08444825    0.08916271 -0.04718006 -0.06772723
## correl1 0.9919347  0.76175893   -0.57516656  0.74414352 -0.48938034
## correl1 0.9978744  0.07649031    0.29385288 -0.11621506 -0.25869724
## correl1 0.9892110  0.68147638   -0.22611736  0.50462436 -0.66677980
## correl1 0.9893143 -0.73131424    0.34262768 -0.72306719  0.58995956
##         Taxa.desligamentos    perc.hom densidade.urbana1     tx.pres
## correl1        -0.89251809 -0.44944694       0.438837568 -0.67583671
## correl1         0.78567234  0.15257365       0.247098357  0.80162542
## correl1         0.51374135  0.31673296       0.663299166  0.88151004
## correl1         0.66691049 -0.48963854       0.818000206  0.60066937
## correl1        -0.34523095  0.13919226       0.165764840 -0.46301060
## correl1         0.26838983  0.81865395       0.053365090  0.38085846
## correl1        -0.19312501 -0.28632328       0.200789594  0.06443223
## correl1         0.31558571 -0.18445360      -0.506958077  0.57562910
## correl1         0.84157691 -0.04856339      -0.868712343  0.74544051
## correl1        -0.60069427 -0.37614813       0.606765597 -0.66916133
## correl1        -0.06635059 -0.52900010      -0.199472825  0.04094857
## correl1        -0.29067447  0.08294620       0.167853366  0.03575094
## correl1        -0.82531634  0.72129243       0.656260006 -0.69117528
## correl1         0.81453590 -0.68161830      -0.708673495  0.74951519
## correl1         0.87464632  0.12469211      -0.907338443  0.86887424
## correl1         0.61003569 -0.71939262      -0.795671391  0.06738001
## correl1        -0.27095316  0.56092700       0.057930614 -0.32276770
## correl1         0.82698265 -0.09999539      -0.437071301  0.88405695
## correl1        -0.84243520  0.25107348       0.896035777 -0.63767229
## correl1         0.45795096 -0.30732917      -0.238414781  0.59300440
## correl1        -0.17567243  0.65185914      -0.255530807  0.15071802
## correl1         0.71721815  0.20406011      -0.721245261  0.85736076
## correl1        -0.10391945 -0.49378624       0.001294581 -0.52300970
## correl1         0.78244109 -0.03022237      -0.728226320  0.67418992
## correl1         0.08757922  0.04611564      -0.101551564  0.23474819
## correl1         0.41754061  0.21069853      -0.528108714  0.70409856
## correl1        -0.72108145  0.09670058      -0.520238873 -0.69252580
##         taxa.casamentos ytrans
## correl1     -0.84022601      1
## correl1     -0.48191466      1
## correl1      0.73897122      1
## correl1      0.60058258      1
## correl1     -0.49252184      1
## correl1      0.63115020      1
## correl1     -0.22195378      1
## correl1      0.43101311      1
## correl1      0.51204494      1
## correl1     -0.44100784      1
## correl1     -0.02375323      1
## correl1     -0.76878265      1
## correl1     -0.81427731      1
## correl1      0.58296041      1
## correl1      0.22096259      1
## correl1     -0.09470244      1
## correl1      0.07117994      1
## correl1      0.60329632      1
## correl1     -0.76900103      1
## correl1      0.38162820      1
## correl1      0.52835808      1
## correl1      0.49681893      1
## correl1     -0.15624649      1
## correl1      0.60086516      1
## correl1     -0.11898673      1
## correl1      0.37853599      1
## correl1     -0.60229762      1
correl_todos=data.frame(correl_todos)
  j=2
    variaveis=c("tx.latr","Ano","perc.jov.1524","pbf","gini.ibge","Taxa.desligamentos","perc.hom","densidade.urbana1","tx.pres","taxa.casamentos","ytrans")
    while(j<=length(variaveis)){
    titulo=variaveis[j]
    a=correl_todos[,j]
    plot(a,main=titulo)
    j=j+1
  }

Abordagem com GLM - distribuição gamma para a variável resposta

Abordagem com modelos robustos: para high-dimensional longitudinal data

Abordagem não paramétrica

Abordagem Bayesiana