library(readxl)
datos <- read_excel("C:/Users/Yohana Argueta/Desktop/datos_parcial_2.xlsx")
datos1<-datos[,-2]
head(datos1)
## # A tibble: 6 x 9
## ID X1 X2 X3 X4 X5 X6 X7 X8
## <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 1 9 2 20 20 0 0 2 56.4
## 2 2 10 6 62.5 50 37.5 3.95 11 147.
## 3 3 10 20 50 50 50 2.56 16 135
## 4 4 8 3 42.9 42.9 14.3 1.35 35 121.
## 5 5 7 7 75 75 75 9.09 8 202.
## 6 6 6 13 30 30 30 8.11 25 81
datos2<-datos1[,-1]
head(datos2)
## # A tibble: 6 x 8
## X1 X2 X3 X4 X5 X6 X7 X8
## <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 9 2 20 20 0 0 2 56.4
## 2 10 6 62.5 50 37.5 3.95 11 147.
## 3 10 20 50 50 50 2.56 16 135
## 4 8 3 42.9 42.9 14.3 1.35 35 121.
## 5 7 7 75 75 75 9.09 8 202.
## 6 6 13 30 30 30 8.11 25 81
library(Hmisc)
library(readr)
library(stargazer)
Mat_R<-rcorr(as.matrix(datos2))
descomposicion<-eigen(Mat_R$r)
stargazer(descomposicion$values,type = "text")
##
## ===============================================
## 3.897 1.964 0.841 0.500 0.454 0.276 0.066 0.002
## -----------------------------------------------
stargazer(descomposicion$vectors,type = "text")
##
## ======================================================
## 0.160 0.537 -0.059 0.734 0.358 -0.128 0.005 -0.003
## 0.063 0.593 0.077 -0.076 -0.788 -0.107 -0.001 0.009
## -0.473 0.074 0.283 0.083 0.023 0.105 -0.806 -0.151
## -0.474 0.106 0.314 0.024 0.057 0.035 0.523 -0.622
## -0.396 0.080 -0.490 -0.189 0.099 -0.741 -0.044 -0.009
## -0.347 0.087 -0.691 0.120 -0.123 0.601 0.056 0.004
## 0.127 0.565 0.001 -0.630 0.467 0.218 -0.044 -0.006
## -0.479 0.100 0.307 0.031 0.067 0.040 0.264 0.768
## ------------------------------------------------------
library(psych)
modelo<-principal(r = datos2,nfactors = 3,covar = FALSE,rotate = "none")
modelo
## Principal Components Analysis
## Call: principal(r = datos2, nfactors = 3, rotate = "none", covar = FALSE)
## Standardized loadings (pattern matrix) based upon correlation matrix
## PC1 PC2 PC3 h2 u2 com
## X1 -0.32 0.75 0.05 0.67 0.3316 1.4
## X2 -0.12 0.83 -0.07 0.71 0.2879 1.1
## X3 0.93 0.10 -0.26 0.95 0.0493 1.2
## X4 0.94 0.15 -0.29 0.98 0.0208 1.2
## X5 0.78 0.11 0.45 0.83 0.1742 1.6
## X6 0.68 0.12 0.63 0.89 0.1142 2.1
## X7 -0.25 0.79 0.00 0.69 0.3107 1.2
## X8 0.94 0.14 -0.28 0.99 0.0087 1.2
##
## PC1 PC2 PC3
## SS loadings 3.90 1.96 0.84
## Proportion Var 0.49 0.25 0.11
## Cumulative Var 0.49 0.73 0.84
## Proportion Explained 0.58 0.29 0.13
## Cumulative Proportion 0.58 0.87 1.00
##
## Mean item complexity = 1.4
## Test of the hypothesis that 3 components are sufficient.
##
## The root mean square of the residuals (RMSR) is 0.06
## with the empirical chi square 21.68 with prob < 0.0029
##
## Fit based upon off diagonal values = 0.98
library(psych)
modelo_5<-principal(r = datos2,nfactors = 3,covar = FALSE,rotate = "varimax")
modelo_5$loadings
##
## Loadings:
## RC1 RC2 RC3
## X1 -0.162 0.801
## X2 0.840
## X3 0.930 0.278
## X4 0.954 0.260
## X5 0.428 0.800
## X6 0.250 0.907
## X7 0.826
## X8 0.957 0.270
##
## RC1 RC2 RC3
## SS loadings 2.974 2.046 1.682
## Proportion Var 0.372 0.256 0.210
## Cumulative Var 0.372 0.628 0.838
EN LA SOLUCION QUEDAN INCLUIDAS PARA EL FACTOR 1: X3, X4 Y X8
EN LA SOLUCION QUEDAN INCLUIDAS PARA EL FACTOR 2: X1, X2 Y X7
EN LA SOLUCION QUEDAN INCLUIDAS PARA EL FACTOR 3: X5 Y X6
norm_directa <- function(x){
return((x-min(x)) / (max(x)-min(x)))
}
norm_inverza <- function(x){
return((max(x)-x) / (max(x)-min(x)))
}
library(dplyr)
datos2 %>% dplyr::select(X3,X4,X8) %>% dplyr::transmute(X3=norm_inverza(X3), X4=norm_inverza(X4), X8=norm_inverza(X8) ) ->data_factor_1
print(data_factor_1)
## # A tibble: 108 x 3
## X3 X4 X8
## <dbl> <dbl> <dbl>
## 1 0.96 0.8 0.842
## 2 0.45 0.5 0.483
## 3 0.6 0.5 0.532
## 4 0.686 0.571 0.587
## 5 0.3 0.25 0.266
## 6 0.84 0.7 0.745
## 7 0.327 0.273 0.286
## 8 0.45 0.5 0.554
## 9 0.6 0.563 0.567
## 10 0.320 0.467 0.448
## # ... with 98 more rows
library(dplyr)
datos2 %>% dplyr::select(X1,X2,X7) %>% dplyr::transmute(X1=norm_inverza(X1), X2=norm_inverza(X2), X7=norm_inverza(X7) ) ->data_factor_2
print(data_factor_2)
## # A tibble: 108 x 3
## X1 X2 X7
## <dbl> <dbl> <dbl>
## 1 0.806 1 1
## 2 0.774 0.983 0.901
## 3 0.774 0.923 0.846
## 4 0.839 0.996 0.637
## 5 0.871 0.979 0.934
## 6 0.903 0.953 0.747
## 7 0.742 0.970 0.923
## 8 0.806 0.996 0.956
## 9 0.774 0.991 0.967
## 10 0.710 0.983 0.956
## # ... with 98 more rows
library(dplyr)
datos2 %>% dplyr::select(X5,X6) %>% dplyr::transmute(X5=norm_inverza(X5), X6=norm_inverza(X6) ) ->data_factor_3
print(data_factor_3)
## # A tibble: 108 x 2
## X5 X6
## <dbl> <dbl>
## 1 1 1
## 2 0.571 0.784
## 3 0.429 0.860
## 4 0.837 0.926
## 5 0.143 0.503
## 6 0.657 0.557
## 7 0.273 0.497
## 8 0.714 0.852
## 9 0.857 0.752
## 10 0.543 0.538
## # ... with 98 more rows
data_factor_1 %>% dplyr::summarise(S=sd(X3),Y=sd(X4),Z=sd(X4))-> sd_vector
print(sd_vector)
## # A tibble: 1 x 3
## S Y Z
## <dbl> <dbl> <dbl>
## 1 0.246 0.201 0.201
cor(data_factor_1)->mat_R_F1
print(mat_R_F1)
## X3 X4 X8
## X3 1.0000000 0.9387159 0.9590445
## X4 0.9387159 1.0000000 0.9958479
## X8 0.9590445 0.9958479 1.0000000
1-mat_R_F1->sum_data
colSums(sum_data)->sum_vector
sd_vector*sum_vector->vj
print(vj)
## S Y Z
## 1 0.02517949 0.01316004 0.009071691
vj/sum(vj)->wj
print(wj)
## S Y Z
## 1 0.5310871 0.2775722 0.1913406
print(round(wj*100,2))
## S Y Z
## 1 53.11 27.76 19.13
datos2 %>% dplyr::select(X1,X2,X7)->data_norm
apply(data_norm,2,prop.table)->data_norm
head(data_norm,n=10)
## X1 X2 X7
## [1,] 0.007812500 0.0007073386 0.001398601
## [2,] 0.008680556 0.0021220159 0.007692308
## [3,] 0.008680556 0.0070733864 0.011188811
## [4,] 0.006944444 0.0010610080 0.024475524
## [5,] 0.006076389 0.0024756852 0.005594406
## [6,] 0.005208333 0.0045977011 0.017482517
## [7,] 0.009548611 0.0031830239 0.006293706
## [8,] 0.007812500 0.0010610080 0.004195804
## [9,] 0.008680556 0.0014146773 0.003496503
## [10,] 0.010416667 0.0021220159 0.004195804
entropy<-function(x){
return(x*log(x))
}
apply(data_norm,2,entropy)->data_norm_2
head(data_norm_2,n=10)
## X1 X2 X7
## [1,] -0.03790649 -0.005131035 -0.009192004
## [2,] -0.04120373 -0.013061833 -0.037442573
## [3,] -0.04120373 -0.035023278 -0.050269550
## [4,] -0.03451259 -0.007266351 -0.090806195
## [5,] -0.03100991 -0.014857176 -0.029012521
## [6,] -0.02738279 -0.024745742 -0.070743949
## [7,] -0.04441402 -0.018302144 -0.031897795
## [8,] -0.03790649 -0.007266351 -0.022966449
## [9,] -0.04120373 -0.009281491 -0.019776195
## [10,] -0.04754529 -0.013061833 -0.022966449
ncol(data_norm)->m
m
## [1] 3
-1/log(m)->K
print(K)
## [1] -0.9102392
K*colSums(data_norm_2)->Ej
print(Ej)
## X1 X2 X7
## 4.180549 3.202899 3.701923
1-Ej->vj
print(vj)
## X1 X2 X7
## -3.180549 -2.202899 -2.701923
prop.table(vj)->wj #es igual a usar vj/sum(vj)
print(wj)
## X1 X2 X7
## 0.3933708 0.2724549 0.3341743
library(readxl)
datos100 <- read_excel("C:/Users/Yohana Argueta/Desktop/subjetivo.xlsx")
datos100
## # A tibble: 4 x 8
## ..1 ..2 `Rank Sum` ..4 `Ran reciprocal` ..6 `Rank exponent` ..8
## <chr> <dbl> <chr> <chr> <chr> <chr> <chr> <chr>
## 1 <NA> NA Weigth Norm~ Weight Norm~ Weight Norm~
## 2 X5 5 -2 0.4 0.2 0.54~ 4 0.30~
## 3 X6 6 -3 0.6 0.1666666666666~ 0.45~ 9 0.69~
## 4 Total NA -5 1 0.3666666666666~ 0.99~ 13 1