# El Servicio Interno de Contribuciones (IRS) de EE.UU. está tratando de estimar la cantidad mensual de impuestos no pagados descubiertos por su departamento de auditorías. En el pasado, el IRS estimaba esta cantidad con base en el número esperado de horas de trabajo de auditorías de campo. En los últimos años, sin embargo, las horas de trabajo de auditorías de campo se han convertido en un pronosticador errático de los impuestos no pagados reales. Como resultado, la dependencia está buscando otro factor para mejorar la ecuación de estimación.
# El departamento de auditorías tiene un registro del número de horas que usa sus computadoras para detectar impuestos no pagados. ¿Podríamos combinar esta información con los datos referentes a las horas de trabajo de auditorías de campo y obtener una ecuación de estimación más precisa para los impuestos no pagados descubiertos por cada mes?
library(readxl)
MODELO_DE_INGRESO <- read_excel("C:/Users/74/Desktop/MODELO DE PRUEBA/MODELO DE INGRESO.xlsx", 
    col_types = c("blank", "numeric", "numeric", 
        "numeric", "numeric", "numeric"))
`col_type = "blank"` deprecated. Use "skip" instead.
MODELO_DE_INGRESO

CAMBIO DE VARIABLE

library(readxl)
MODELO_DE_INGRESO1 <- read_excel("C:/Users/74/Desktop/MODELO DE PRUEBA/MODELO DE INGRESO1.xlsx", 
    col_types = c("blank", "numeric", "numeric", 
        "numeric", "numeric", "numeric"))
`col_type = "blank"` deprecated. Use "skip" instead.
head(MODELO_DE_INGRESO1, n=10)

Correlation Matrix

library(corrplot)
corrplot(corr = cor(x = MODELO_DE_INGRESO1, method = "pearson"), method = "number")

Correlation Matrix

library(psych)
pairs.panels(x = MODELO_DE_INGRESO1, ellipses = FALSE, lm = TRUE, method = "pearson")

Correlation Matrix

cor(x = MODELO_DE_INGRESO1, method = "pearson")
            y         X1        X2          X3         X4
y  1.00000000  0.8114956 0.9143126  0.08780545  0.8138792
X1 0.81149558  1.0000000 0.7197724 -0.13482984  0.6442168
X2 0.91431258  0.7197724 1.0000000  0.12146238  0.5418813
X3 0.08780545 -0.1348298 0.1214624  1.00000000 -0.2056464
X4 0.81387915  0.6442168 0.5418813 -0.20564645  1.0000000

REGRESADA

library(readr)
library(dplyr)
MODELO_DE_INGRESO1 %>% select("y") %>% as.matrix()->Yregresada
Yregresada
           y
 [1,] 450000
 [2,] 590000
 [3,] 600000
 [4,] 630000
 [5,] 670000
 [6,] 630000
 [7,] 610000
 [8,] 640000
 [9,] 670000
[10,] 890000
options(scipen = 999)

REGRESORES

MODELO_DE_INGRESO1%>%  mutate(Cte=1) %>% select("Cte","X1","X2","X3", "X4") %>% as.matrix()->Xregresor
Xregresor
      Cte    X1     X2    X3     X4
 [1,]   1 20000 160000 20000 250000
 [2,]   1 20000 210000 50000 300000
 [3,]   1 20000 240000 20000 330000
 [4,]   1 30000 240000 30000 330000
 [5,]   1 20000 250000 30000 360000
 [6,]   1 40000 180000 30000 380000
 [7,]   1 20000 190000 20000 380000
 [8,]   1 30000 220000 20000 370000
 [9,]   1 50000 230000 10000 380000
[10,]   1 70000 390000 30000 410000

SIGMAMATRIZ

Xtranspuesta<-t(Xregresor)
sigmamatriz<-(Xtranspuesta%*%Xregresor)
sigmamatriz
        Cte           X1           X2          X3            X4
Cte      10       320000      2310000      260000       3490000
X1   320000  12800000000  80800000000  8100000000  116300000000
X2  2310000  80800000000 569300000000 60800000000  820700000000
X3   260000   8100000000  60800000000  7800000000   89800000000
X4  3490000 116300000000 820700000000 89800000000 1238100000000
options(scipen = 9)

MATRIZ CRUZADA

cruzada<-(Xtranspuesta%*%Yregresada)
cruzada
                y
Cte       6380000
X1   217500000000
X2  1529900000000
X3   166800000000
X4  2264100000000
options(scipen = 99)

INVERSA DE LA SIGMAMATRIZ

inversigma<-solve(sigmamatriz)
inversigma
                Cte                  X1                   X2                   X3
Cte  9.578240367495  0.0000373428270558 -0.00000220580296064 -0.00003921269280963
X1   0.000037342827  0.0000000010476900 -0.00000000016033188  0.00000000023243842
X2  -0.000002205803 -0.0000000001603319  0.00000000006766047 -0.00000000009700525
X3  -0.000039212693  0.0000000002324384 -0.00000000009700525  0.00000000114308493
X4  -0.000026200975 -0.0000000001142569 -0.00000000001653585  0.00000000007009360
                      X4
Cte -0.00002620097517571
X1  -0.00000000011425685
X2  -0.00000000001653585
X3   0.00000000007009360
X4   0.00000000009127376
options(scipen = 9)

BETAS ESTIMADOS

Betas_Estimados<-(inversigma%*%cruzada)
Betas_Estimados
                y
Cte -5724.5761971
X1      0.9098562
X2      0.9492702
X3      1.3355403
X4      1.0332484

Confidence Intervals

confint(object = modelo_lineal,level = .95)
                     2.5 %       97.5 %
(Intercept) -55359.5353465 43910.382952
X1               0.3907440     1.428968
X2               0.8173497     1.081191
X3               0.7933097     1.877771
X4               0.8800276     1.186469

MATRIZ P

Matriz_P<-(Xregresor%*%inversigma%*%Xtranspuesta)
head(Matriz_P, n = 5)
           [,1]        [,2]        [,3]      [,4]       [,5]        [,6]        [,7]
[1,]  0.7846221  0.11501380  0.17883272 0.1691169 -0.1321183 -0.04952925 -0.13370254
[2,]  0.1150138  0.70810335 -0.05191336 0.2002776  0.1382571  0.22728938 -0.05476714
[3,]  0.1788327 -0.05191336  0.37856361 0.1276475  0.2663539 -0.25711624  0.16048147
[4,]  0.1691169  0.20027762  0.12764747 0.1422965  0.1140074  0.01786630  0.01615227
[5,] -0.1321183  0.13825710  0.26635394 0.1140074  0.3700988 -0.08676176  0.25143747
             [,8]        [,9]       [,10]
[1,]  0.002868704  0.19870331 -0.13380740
[2,] -0.054773557 -0.26867652  0.04118935
[3,]  0.140290509  0.01900248  0.03785741
[4,]  0.051189341  0.01822463  0.14322153
[5,]  0.147756510 -0.12845770  0.05942653

PROYECCION DE Y SOBRE X

Y_Proyectada<-(Matriz_P%*%Yregresada)
Y_Proyectada
             y
 [1,] 449378.7
 [2,] 588570.8
 [3,] 607980.2
 [4,] 630434.2
 [5,] 661825.8
 [6,] 634238.9
 [7,] 612179.1
 [8,] 639423.3
 [9,] 664090.2
[10,] 891878.8

RESIDUALS

Errores<-(Yregresada-Y_Proyectada)
Errores
               y
 [1,]   621.2985
 [2,]  1429.1554
 [3,] -7980.1956
 [4,]  -434.1606
 [5,]  8174.2458
 [6,] -4238.9305
 [7,] -2179.1058
 [8,]   576.7096
 [9,]  5909.8020
[10,] -1878.8188
plot(Errores)

hist(Errores)

MODELO LM

library(readr)
library(stargazer)
modelo_lineal<-lm(y~X1+X2+X3+X4,data = MODELO_DE_INGRESO1)
stargazer(modelo_lineal,title = "modelo estimado",type = "text",digits = 8)
length of NULL cannot be changedlength of NULL cannot be changedlength of NULL cannot be changedlength of NULL cannot be changedlength of NULL cannot be changed

modelo estimado
===============================================
                        Dependent variable:    
                    ---------------------------
                                 y             
-----------------------------------------------
X1                         0.90985620***       
                           (0.20194340)        
                                               
X2                         0.94927020***       
                           (0.05131932)        
                                               
X3                         1.33554000***       
                           (0.21093690)        
                                               
X4                         1.03324800***       
                           (0.05960550)        
                                               
Constant                  -5,724.57600000      
                         (19,308.84000000)     
                                               
-----------------------------------------------
Observations                    10             
R2                          0.99815630         
Adjusted R2                 0.99668130         
Residual Std. Error   6,238.97700000 (df = 5)  
F Statistic         676.72340000*** (df = 4; 5)
===============================================
Note:               *p<0.1; **p<0.05; ***p<0.01

Confidence Intervals

confint(object = modelo_lineal,level = .96)
                       2 %         98 %
(Intercept) -58949.5644014 47500.412007
X1               0.3531974     1.466515
X2               0.8078081     1.090732
X3               0.7540909     1.916990
X4               0.8689454     1.197552
options(scipen=999)
modelo_lineal$coefficients
  (Intercept)            X1            X2            X3            X4 
-5724.5761971     0.9098562     0.9492702     1.3355403     1.0332484 
summary(modelo_lineal)

Call:
lm(formula = y ~ X1 + X2 + X3 + X4, data = MODELO_DE_INGRESO1)

Residuals:
      1       2       3       4       5       6       7       8       9      10 
  621.3  1429.2 -7980.2  -434.2  8174.2 -4238.9 -2179.1   576.7  5909.8 -1878.8 

Coefficients:
               Estimate  Std. Error t value  Pr(>|t|)    
(Intercept) -5724.57620 19308.84225  -0.296   0.77879    
X1              0.90986     0.20194   4.505   0.00637 ** 
X2              0.94927     0.05132  18.497 0.0000085 ***
X3              1.33554     0.21094   6.331   0.00145 ** 
X4              1.03325     0.05961  17.335 0.0000117 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 6239 on 5 degrees of freedom
Multiple R-squared:  0.9982,    Adjusted R-squared:  0.9967 
F-statistic: 676.7 on 4 and 5 DF,  p-value: 0.0000005102
summary(MODELO_DE_INGRESO1)
       y                X1              X2               X3              X4        
 Min.   :450000   Min.   :20000   Min.   :160000   Min.   :10000   Min.   :250000  
 1st Qu.:602500   1st Qu.:20000   1st Qu.:195000   1st Qu.:20000   1st Qu.:330000  
 Median :630000   Median :25000   Median :225000   Median :25000   Median :365000  
 Mean   :638000   Mean   :32000   Mean   :231000   Mean   :26000   Mean   :349000  
 3rd Qu.:662500   3rd Qu.:37500   3rd Qu.:240000   3rd Qu.:30000   3rd Qu.:380000  
 Max.   :890000   Max.   :70000   Max.   :390000   Max.   :50000   Max.   :410000  

OBJETOS DENTRO DEL MODELO

VECTOR DE COEFICIENTES ESTIMADOS

options(scipen=999)
modelo_lineal$coefficients
  (Intercept)            X1            X2            X3            X4 
-5724.5761971     0.9098562     0.9492702     1.3355403     1.0332484 

MATRIZ VARIANZA COVARIANZA DE LOS PARAMETROS

Var_Covar<-vcov(modelo_lineal)
print(Var_Covar)
                (Intercept)             X1            X2              X3              X4
(Intercept) 372831388.95091 1453.563237546 -85.860507778 -1526.347446273 -1019.868534494
X1               1453.56324    0.040781157  -0.006240891     0.009047626    -0.004447429
X2                -85.86051   -0.006240891   0.002633673    -0.003775913    -0.000643655
X3              -1526.34745    0.009047626  -0.003775913     0.044494388     0.002728382
X4              -1019.86853   -0.004447429  -0.000643655     0.002728382     0.003552816

Analisis de Robustes del modelo

AJUSTES DE LOS RESIDUOS A LA DISTRIBUCION NORMAL

#Ajuste de los residuos a la distribucion normal
library(fitdistrplus)
package <U+393C><U+3E31>fitdistrplus<U+393C><U+3E32> was built under R version 3.5.3Loading required package: MASS
package <U+393C><U+3E31>MASS<U+393C><U+3E32> was built under R version 3.5.3
Attaching package: <U+393C><U+3E31>MASS<U+393C><U+3E32>

The following object is masked from <U+393C><U+3E31>package:dplyr<U+393C><U+3E32>:

    select

Loading required package: survival
Loading required package: npsurv
Loading required package: lsei
library(stargazer)
fit_normal<-fitdist(data = modelo_lineal$residuals,distr = "norm")
NaNs producedNaNs produced
fit_normal
Fitting of the distribution ' norm ' by maximum likelihood 
Parameters:
plot(fit_normal)

PRUEBA DE JARQUE BERA

#Distribucion chi cuadrado 
library(normtest)  
jb.norm.test(modelo_lineal$residuals)

    Jarque-Bera test for normality

data:  modelo_lineal$residuals
JB = 0.10016, p-value = 0.959
#Prueba de normalidad Jarque Bera   p-valué>0.05, Se cumple la hipótesis de Normalidad de los residuos
#Para considerar que la distribución de los errores es normal, el valor JB debe ser menor al valor crítico. Como el valor obtenido para el estadístico es menor que el valor crítico tabulado V.C. = 9.48 y JB = 0.10016, No se puede rechazar la hipótesis nula de normalidad de los residuos, cumpliéndose por tanto este supuesto básico.

Prueba de Normalidad Kolmogorov - Smirnov

#Tabla Lillifors
#Con un nivel de significancia de 0.05 y una muestra de n=10 tenemos un valor critico = 0.2616
#No se rechaza la hipotesis nula por lo que se concluye que la prueba tienen una distribución normal debido a que D(0.1793) = VC(0.0.2616) 
#Prueba de Normalidad de Kolmogorov - Smirnov   el p-valué>0.05 Se acepta la hipótesis nula, lo que quiere decir que hay evidencia de que los errores se distribuyan de una manera normal.
library(nortest)
lillie.test(modelo_lineal$residuals)

    Lilliefors (Kolmogorov-Smirnov) normality test

data:  modelo_lineal$residuals
D = 0.1793, p-value = 0.4809
hist(modelo_lineal$residuals,main = "Histograma de los residuos",xlab = "Residuos",ylab = "Frecuencia")

qqnorm(modelo_lineal$residuals)
qqline(modelo_lineal$residuals)

Prueba de Normalidad de Shapiro - Wilk

#Distribución normal con parámetros estimados
library(normtest)
shapiro.test(modelo_lineal$residuals)

    Shapiro-Wilk normality test

data:  modelo_lineal$residuals
W = 0.96826, p-value = 0.8743
#Tabla Lillifors
#Prueba de Normalidad de Shapiro - Wilk el p-valué>0.05 Hay evidencia de que los errores se distribuyen de una manera normal.
#el p-value>0.05 por lo tanto se acepta la hipoteis nula, lo que quiere decir que hay evidencia de que los errores se distribuyan de una manera normal.' 
#Calcular la matriz |X´X|
library(stargazer)
X_mat<-model.matrix(modelo_lineal)
stargazer(head(X_mat,n=6),type="text")

===========================================
  (Intercept)   X1     X2      X3     X4   
-------------------------------------------
1      1      20,000 160,000 20,000 250,000
2      1      20,000 210,000 50,000 300,000
3      1      20,000 240,000 20,000 330,000
4      1      30,000 240,000 30,000 330,000
5      1      20,000 250,000 30,000 360,000
6      1      40,000 180,000 30,000 380,000
-------------------------------------------
XX_matrix<-t(X_mat)%*%X_mat
stargazer(XX_matrix,type = "text")

========================================================================================
            (Intercept)       X1              X2              X3              X4        
----------------------------------------------------------------------------------------
(Intercept)     10          320,000        2,310,000       260,000         3,490,000    
X1            320,000   12,800,000,000  80,800,000,000  8,100,000,000   116,300,000,000 
X2           2,310,000  80,800,000,000  569,300,000,000 60,800,000,000  820,700,000,000 
X3            260,000    8,100,000,000  60,800,000,000  7,800,000,000   89,800,000,000  
X4           3,490,000  116,300,000,000 820,700,000,000 89,800,000,000 1,238,100,000,000
----------------------------------------------------------------------------------------
head(XX_matrix, n=5)
            (Intercept)           X1           X2          X3            X4
(Intercept)          10       320000      2310000      260000       3490000
X1               320000  12800000000  80800000000  8100000000  116300000000
X2              2310000  80800000000 569300000000 60800000000  820700000000
X3               260000   8100000000  60800000000  7800000000   89800000000
X4              3490000 116300000000 820700000000 89800000000 1238100000000
#Indice de Condición
#Normalizacion |X´X|
library(stargazer)
options(scipen = 999)
Sn<-solve(diag(sqrt(diag(XX_matrix))))
stargazer(Sn,type = "text")

=====================================
0.316    0       0       0       0   
0     0.00001    0       0       0   
0        0    0.00000    0       0   
0        0       0    0.00001    0   
0        0       0       0    0.00000
-------------------------------------
#|X´X| Normalizada
library(stargazer)
XX_norm<-(Sn%*%XX_matrix)%*%Sn
stargazer(XX_norm,type = "text",digits = 4)

==================================
1      0.8944 0.9681 0.9309 0.9919
0.8944   1    0.9465 0.8106 0.9238
0.9681 0.9465   1    0.9124 0.9775
0.9309 0.8106 0.9124   1    0.9138
0.9919 0.9238 0.9775 0.9138   1   
----------------------------------
#Autovalores de |X´X| Normalizada:
library(stargazer)
#autovalores
lambdas<-eigen(XX_norm,symmetric = TRUE)
stargazer(lambdas$values,type = "text")

=============================
4.711 0.197 0.067 0.020 0.005
-----------------------------
K<-sqrt(max(lambdas$values)/min(lambdas$values))
print(K)
[1] 30.69733
#Prueba de Farrar-Glaubar
#Calculo de |R|
library(stargazer)
Zn<-scale(X_mat[,-1])
stargazer(head(Zn,n=6),type = "text")

=============================
    X1     X2     X3     X4  
-----------------------------
1 -0.712 -1.127 -0.558 -2.095
2 -0.712 -0.333 2.233  -1.037
3 -0.712 0.143  -0.558 -0.402
4 -0.119 0.143  0.372  -0.402
5 -0.712 0.302  0.372  0.233 
6 0.474  -0.810 0.372  0.656 
-----------------------------
#Calcular la matriz R
library(stargazer)
n<-nrow(Zn)
R<-(t(Zn)%*%Zn)*(1/(n-1))
#También se puede calcular R a través de cor(X_mat[,-1])
stargazer(R,type = "text",digits = 4)

=================================
     X1      X2     X3      X4   
---------------------------------
X1    1    0.7198 -0.1348 0.6442 
X2 0.7198    1    0.1215  0.5419 
X3 -0.1348 0.1215    1    -0.2056
X4 0.6442  0.5419 -0.2056    1   
---------------------------------
#Determinante de la matriz R
determinante_R<-det(R)
print(determinante_R)
[1] 0.2320024
#Aplicando la prueba de Farrer Glaubar (Bartlett)
#Estadistico X2FG
m<-ncol(X_mat[,-1])
n<-nrow(X_mat[,-1])
chi_FG<--(n-1-(2*m+5)/6)*log(determinante_R)
print(chi_FG)
[1] 9.983551
# Valor critico 
gl<-m*(m-1)/2
VC<-qchisq(p = 0.95,df = gl)
print(VC)
[1] 12.59159
#VIF´s para el modelo estimado:
library(car)
VIFs_car<-vif(modelo_lineal)
print(VIFs_car)
      X1       X2       X3       X4 
2.682086 2.414802 1.188808 1.833690 
library(mctest)
mc.plot(x = X_mat[,-1],y = MODELO_DE_INGRESO1$y,vif = 3,)

LS0tDQp0aXRsZTogIkVDVUFDSU9OIERFIEVTVElNQUNJT04iDQphdXRob3I6ICJEZW5pcyBGZXJuYW5kbyBGbGFtZW5jbyBOb2xhc2NvIg0Kb3V0cHV0OiANCiAgaHRtbF9ub3RlYm9vazogDQogICAgdG9jOiB5ZXMNCiAgaHRtbF9kb2N1bWVudDogDQogICAgZGZfcHJpbnQ6IGthYmxlDQogICAgdG9jOiB5ZXMNCi0tLQ0KDQpgYGB7ciBzZXR1cCwgaW5jbHVkZT1GQUxTRX0NCmtuaXRyOjpvcHRzX2NodW5rJHNldChlY2hvID0gVFJVRSkNCmBgYA0KDQpgYGB7cn0NCiMgRWwgU2VydmljaW8gSW50ZXJubyBkZSBDb250cmlidWNpb25lcyAoSVJTKSBkZSBFRS5VVS4gZXN04SB0cmF0YW5kbyBkZSBlc3RpbWFyIGxhIGNhbnRpZGFkIG1lbnN1YWwgZGUgaW1wdWVzdG9zIG5vIHBhZ2Fkb3MgZGVzY3ViaWVydG9zIHBvciBzdSBkZXBhcnRhbWVudG8gZGUgYXVkaXRvcu1hcy4gRW4gZWwgcGFzYWRvLCBlbCBJUlMgZXN0aW1hYmEgZXN0YSBjYW50aWRhZCBjb24gYmFzZSBlbiBlbCBu+m1lcm8gZXNwZXJhZG8gZGUgaG9yYXMgZGUgdHJhYmFqbyBkZSBhdWRpdG9y7WFzIGRlIGNhbXBvLiBFbiBsb3Mg+mx0aW1vcyBh8W9zLCBzaW4gZW1iYXJnbywgbGFzIGhvcmFzIGRlIHRyYWJham8gZGUgYXVkaXRvcu1hcyBkZSBjYW1wbyBzZSBoYW4gY29udmVydGlkbyBlbiB1biBwcm9ub3N0aWNhZG9yIGVycuF0aWNvIGRlIGxvcyBpbXB1ZXN0b3Mgbm8gcGFnYWRvcyByZWFsZXMuIENvbW8gcmVzdWx0YWRvLCBsYSBkZXBlbmRlbmNpYSBlc3ThIGJ1c2NhbmRvIG90cm8gZmFjdG9yIHBhcmEgbWVqb3JhciBsYSBlY3VhY2nzbiBkZSBlc3RpbWFjafNuLg0KDQojIEVsIGRlcGFydGFtZW50byBkZSBhdWRpdG9y7WFzIHRpZW5lIHVuIHJlZ2lzdHJvIGRlbCBu+m1lcm8gZGUgaG9yYXMgcXVlIHVzYSBzdXMgY29tcHV0YWRvcmFzIHBhcmEgZGV0ZWN0YXIgaW1wdWVzdG9zIG5vIHBhZ2Fkb3MuIL9Qb2Ry7WFtb3MgY29tYmluYXIgZXN0YSBpbmZvcm1hY2nzbiBjb24gbG9zIGRhdG9zIHJlZmVyZW50ZXMgYSBsYXMgaG9yYXMgZGUgdHJhYmFqbyBkZSBhdWRpdG9y7WFzIGRlIGNhbXBvIHkgb2J0ZW5lciB1bmEgZWN1YWNp824gZGUgZXN0aW1hY2nzbiBt4XMgcHJlY2lzYSBwYXJhIGxvcyBpbXB1ZXN0b3Mgbm8gcGFnYWRvcyBkZXNjdWJpZXJ0b3MgcG9yIGNhZGEgbWVzPw0KDQpgYGANCg0KDQoNCmBgYHtyfQ0KbGlicmFyeShyZWFkeGwpDQpNT0RFTE9fREVfSU5HUkVTTyA8LSByZWFkX2V4Y2VsKCJDOi9Vc2Vycy83NC9EZXNrdG9wL01PREVMTyBERSBQUlVFQkEvTU9ERUxPIERFIElOR1JFU08ueGxzeCIsIA0KICAgIGNvbF90eXBlcyA9IGMoImJsYW5rIiwgIm51bWVyaWMiLCAibnVtZXJpYyIsIA0KICAgICAgICAibnVtZXJpYyIsICJudW1lcmljIiwgIm51bWVyaWMiKSkNCk1PREVMT19ERV9JTkdSRVNPDQpgYGANCg0KDQpDQU1CSU8gREUgVkFSSUFCTEUNCg0KYGBge3J9DQpsaWJyYXJ5KHJlYWR4bCkNCk1PREVMT19ERV9JTkdSRVNPMSA8LSByZWFkX2V4Y2VsKCJDOi9Vc2Vycy83NC9EZXNrdG9wL01PREVMTyBERSBQUlVFQkEvTU9ERUxPIERFIElOR1JFU08xLnhsc3giLCANCiAgICBjb2xfdHlwZXMgPSBjKCJibGFuayIsICJudW1lcmljIiwgIm51bWVyaWMiLCANCiAgICAgICAgIm51bWVyaWMiLCAibnVtZXJpYyIsICJudW1lcmljIikpDQpoZWFkKE1PREVMT19ERV9JTkdSRVNPMSwgbj0xMCkNCmBgYA0KDQojQ29ycmVsYXRpb24gTWF0cml4DQoNCmBgYHtyfQ0KbGlicmFyeShjb3JycGxvdCkNCmNvcnJwbG90KGNvcnIgPSBjb3IoeCA9IE1PREVMT19ERV9JTkdSRVNPMSwgbWV0aG9kID0gInBlYXJzb24iKSwgbWV0aG9kID0gIm51bWJlciIpDQoNCmBgYA0KDQojQ29ycmVsYXRpb24gTWF0cml4DQoNCmBgYHtyfQ0KbGlicmFyeShwc3ljaCkNCnBhaXJzLnBhbmVscyh4ID0gTU9ERUxPX0RFX0lOR1JFU08xLCBlbGxpcHNlcyA9IEZBTFNFLCBsbSA9IFRSVUUsIG1ldGhvZCA9ICJwZWFyc29uIikNCmBgYA0KDQojQ29ycmVsYXRpb24gTWF0cml4DQoNCmBgYHtyfQ0KY29yKHggPSBNT0RFTE9fREVfSU5HUkVTTzEsIG1ldGhvZCA9ICJwZWFyc29uIikNCmBgYA0KDQpSRUdSRVNBREENCg0KYGBge3J9DQoNCmxpYnJhcnkocmVhZHIpDQpsaWJyYXJ5KGRwbHlyKQ0KTU9ERUxPX0RFX0lOR1JFU08xICU+JSBzZWxlY3QoInkiKSAlPiUgYXMubWF0cml4KCktPllyZWdyZXNhZGENCllyZWdyZXNhZGENCg0Kb3B0aW9ucyhzY2lwZW4gPSA5OTkpDQpgYGANCg0KUkVHUkVTT1JFUw0KDQpgYGB7cn0NCg0KTU9ERUxPX0RFX0lOR1JFU08xJT4lICBtdXRhdGUoQ3RlPTEpICU+JSBzZWxlY3QoIkN0ZSIsIlgxIiwiWDIiLCJYMyIsICJYNCIpICU+JSBhcy5tYXRyaXgoKS0+WHJlZ3Jlc29yDQpYcmVncmVzb3INCg0KDQpgYGANCg0KDQpTSUdNQU1BVFJJWg0KDQpgYGB7cn0NClh0cmFuc3B1ZXN0YTwtdChYcmVncmVzb3IpDQpzaWdtYW1hdHJpejwtKFh0cmFuc3B1ZXN0YSUqJVhyZWdyZXNvcikNCnNpZ21hbWF0cml6DQpvcHRpb25zKHNjaXBlbiA9IDkpDQpgYGANCg0KTUFUUklaIENSVVpBREENCg0KYGBge3J9DQoNCmNydXphZGE8LShYdHJhbnNwdWVzdGElKiVZcmVncmVzYWRhKQ0KY3J1emFkYQ0Kb3B0aW9ucyhzY2lwZW4gPSA5OSkNCg0KYGBgDQoNCklOVkVSU0EgREUgTEEgU0lHTUFNQVRSSVoNCg0KYGBge3J9DQppbnZlcnNpZ21hPC1zb2x2ZShzaWdtYW1hdHJpeikNCmludmVyc2lnbWENCm9wdGlvbnMoc2NpcGVuID0gOSkNCmBgYA0KDQpCRVRBUyBFU1RJTUFET1MgDQoNCmBgYHtyfQ0KQmV0YXNfRXN0aW1hZG9zPC0oaW52ZXJzaWdtYSUqJWNydXphZGEpDQpCZXRhc19Fc3RpbWFkb3MNCmBgYA0KDQojQ29uZmlkZW5jZSBJbnRlcnZhbHMNCg0KYGBge3J9DQpjb25maW50KG9iamVjdCA9IG1vZGVsb19saW5lYWwsbGV2ZWwgPSAuOTUpDQpgYGANCg0KDQoNCg0KTUFUUklaIFANCg0KYGBge3J9DQpNYXRyaXpfUDwtKFhyZWdyZXNvciUqJWludmVyc2lnbWElKiVYdHJhbnNwdWVzdGEpDQpoZWFkKE1hdHJpel9QLCBuID0gNSkNCmBgYA0KDQpQUk9ZRUNDSU9OIERFIFkgU09CUkUgWA0KIA0KYGBge3J9DQpZX1Byb3llY3RhZGE8LShNYXRyaXpfUCUqJVlyZWdyZXNhZGEpDQpZX1Byb3llY3RhZGENCg0KYGBgDQoNClJFU0lEVUFMUyANCg0KYGBge3J9DQpFcnJvcmVzPC0oWXJlZ3Jlc2FkYS1ZX1Byb3llY3RhZGEpDQpFcnJvcmVzDQpwbG90KEVycm9yZXMpDQpoaXN0KEVycm9yZXMpDQpgYGANCg0KTU9ERUxPIExNDQoNCmBgYHtyfQ0KbGlicmFyeShyZWFkcikNCmxpYnJhcnkoc3RhcmdhemVyKQ0KbW9kZWxvX2xpbmVhbDwtbG0oeX5YMStYMitYMytYNCxkYXRhID0gTU9ERUxPX0RFX0lOR1JFU08xKQ0Kc3RhcmdhemVyKG1vZGVsb19saW5lYWwsdGl0bGUgPSAibW9kZWxvIGVzdGltYWRvIix0eXBlID0gInRleHQiLGRpZ2l0cyA9IDgpDQoNCmBgYA0KDQoNCiNDb25maWRlbmNlIEludGVydmFscw0KDQpgYGB7cn0NCmNvbmZpbnQob2JqZWN0ID0gbW9kZWxvX2xpbmVhbCxsZXZlbCA9IC45NikNCm9wdGlvbnMoc2NpcGVuPTk5OSkNCm1vZGVsb19saW5lYWwkY29lZmZpY2llbnRzDQoNCmBgYA0KDQoNCg0KDQpgYGB7cn0NCnN1bW1hcnkobW9kZWxvX2xpbmVhbCkNCnN1bW1hcnkoTU9ERUxPX0RFX0lOR1JFU08xKQ0KDQpgYGANCg0KDQpPQkpFVE9TIERFTlRSTyBERUwgTU9ERUxPIA0KDQpWRUNUT1IgREUgQ09FRklDSUVOVEVTIEVTVElNQURPUw0KDQpgYGB7cn0NCm9wdGlvbnMoc2NpcGVuPTk5OSkNCm1vZGVsb19saW5lYWwkY29lZmZpY2llbnRzDQoNCmBgYA0KDQoNCk1BVFJJWiBWQVJJQU5aQSBDT1ZBUklBTlpBIERFIExPUyBQQVJBTUVUUk9TDQoNCmBgYHtyfQ0KVmFyX0NvdmFyPC12Y292KG1vZGVsb19saW5lYWwpDQpwcmludChWYXJfQ292YXIpDQpgYGANCg0KI0FuYWxpc2lzIGRlIFJvYnVzdGVzIGRlbCBtb2RlbG8NCiNBSlVTVEVTIERFIExPUyBSRVNJRFVPUyBBIExBIERJU1RSSUJVQ0lPTiBOT1JNQUwgDQoNCg0KYGBge3J9DQojQWp1c3RlIGRlIGxvcyByZXNpZHVvcyBhIGxhIGRpc3RyaWJ1Y2lvbiBub3JtYWwNCmxpYnJhcnkoZml0ZGlzdHJwbHVzKQ0KbGlicmFyeShzdGFyZ2F6ZXIpDQpmaXRfbm9ybWFsPC1maXRkaXN0KGRhdGEgPSBtb2RlbG9fbGluZWFsJHJlc2lkdWFscyxkaXN0ciA9ICJub3JtIikNCmZpdF9ub3JtYWwNCnBsb3QoZml0X25vcm1hbCkNCmBgYA0KDQoNClBSVUVCQSBERSBKQVJRVUUgQkVSQSANCg0KYGBge3J9DQojRGlzdHJpYnVjaW9uIGNoaSBjdWFkcmFkbyANCmxpYnJhcnkobm9ybXRlc3QpICANCmpiLm5vcm0udGVzdChtb2RlbG9fbGluZWFsJHJlc2lkdWFscykNCg0KI1BydWViYSBkZSBub3JtYWxpZGFkIEphcnF1ZSBCZXJhCXAtdmFsdek+MC4wNSwgU2UgY3VtcGxlIGxhIGhpcPN0ZXNpcyBkZSBOb3JtYWxpZGFkIGRlIGxvcyByZXNpZHVvcw0KDQoNCiNQYXJhIGNvbnNpZGVyYXIgcXVlIGxhIGRpc3RyaWJ1Y2nzbiBkZSBsb3MgZXJyb3JlcyBlcyBub3JtYWwsIGVsIHZhbG9yIEpCIGRlYmUgc2VyIG1lbm9yIGFsIHZhbG9yIGNy7XRpY28uIENvbW8gZWwgdmFsb3Igb2J0ZW5pZG8gcGFyYSBlbCBlc3RhZO1zdGljbyBlcyBtZW5vciBxdWUgZWwgdmFsb3IgY3LtdGljbyB0YWJ1bGFkbyBWLkMuID0gOS40OCB5IEpCID0gMC4xMDAxNiwgTm8gc2UgcHVlZGUgcmVjaGF6YXIgbGEgaGlw83Rlc2lzIG51bGEgZGUgbm9ybWFsaWRhZCBkZSBsb3MgcmVzaWR1b3MsIGN1bXBsaeluZG9zZSBwb3IgdGFudG8gZXN0ZSBzdXB1ZXN0byBi4XNpY28uDQoNCmBgYA0KDQojUHJ1ZWJhIGRlIE5vcm1hbGlkYWQgS29sbW9nb3JvdiAtIFNtaXJub3YNCg0KYGBge3J9DQojVGFibGEgTGlsbGlmb3JzDQojQ29uIHVuIG5pdmVsIGRlIHNpZ25pZmljYW5jaWEgZGUgMC4wNSB5IHVuYSBtdWVzdHJhIGRlIG49MTAgdGVuZW1vcyB1biB2YWxvciBjcml0aWNvID0gMC4yNjE2DQojTm8gc2UgcmVjaGF6YSBsYSBoaXBvdGVzaXMgbnVsYSBwb3IgbG8gcXVlIHNlIGNvbmNsdXllIHF1ZSBsYSBwcnVlYmEgdGllbmVuIHVuYSBkaXN0cmlidWNp824gbm9ybWFsIGRlYmlkbyBhIHF1ZSBEKDAuMTc5MykgPz8/IFZDKDAuMC4yNjE2KSANCg0KI1BydWViYSBkZSBOb3JtYWxpZGFkIGRlIEtvbG1vZ29yb3YgLSBTbWlybm92CWVsIHAtdmFsdek+MC4wNSBTZSBhY2VwdGEgbGEgaGlw83Rlc2lzIG51bGEsIGxvIHF1ZSBxdWllcmUgZGVjaXIgcXVlIGhheSBldmlkZW5jaWEgZGUgcXVlIGxvcyBlcnJvcmVzIHNlIGRpc3RyaWJ1eWFuIGRlIHVuYSBtYW5lcmEgbm9ybWFsLg0KDQpsaWJyYXJ5KG5vcnRlc3QpDQpsaWxsaWUudGVzdChtb2RlbG9fbGluZWFsJHJlc2lkdWFscykNCg0KYGBgDQoNCg0KYGBge3J9DQpoaXN0KG1vZGVsb19saW5lYWwkcmVzaWR1YWxzLG1haW4gPSAiSGlzdG9ncmFtYSBkZSBsb3MgcmVzaWR1b3MiLHhsYWIgPSAiUmVzaWR1b3MiLHlsYWIgPSAiRnJlY3VlbmNpYSIpDQpgYGANCg0KDQpgYGB7cn0NCnFxbm9ybShtb2RlbG9fbGluZWFsJHJlc2lkdWFscykNCnFxbGluZShtb2RlbG9fbGluZWFsJHJlc2lkdWFscykNCmBgYA0KDQoNCiNQcnVlYmEgZGUgTm9ybWFsaWRhZCBkZSBTaGFwaXJvIC0gV2lsaw0KDQpgYGB7cn0NCiNEaXN0cmlidWNp824gbm9ybWFsIGNvbiBwYXLhbWV0cm9zIGVzdGltYWRvcw0KDQpsaWJyYXJ5KG5vcm10ZXN0KQ0Kc2hhcGlyby50ZXN0KG1vZGVsb19saW5lYWwkcmVzaWR1YWxzKQ0KDQojVGFibGEgTGlsbGlmb3JzDQoNCiNQcnVlYmEgZGUgTm9ybWFsaWRhZCBkZSBTaGFwaXJvIC0gV2lsawllbCBwLXZhbHXpPjAuMDUgSGF5IGV2aWRlbmNpYSBkZSBxdWUgbG9zIGVycm9yZXMgc2UgZGlzdHJpYnV5ZW4gZGUgdW5hIG1hbmVyYSBub3JtYWwuDQoNCiNlbCBwLXZhbHVlPjAuMDUgcG9yIGxvIHRhbnRvIHNlIGFjZXB0YSBsYSBoaXBvdGVpcyBudWxhLCBsbyBxdWUgcXVpZXJlIGRlY2lyIHF1ZSBoYXkgZXZpZGVuY2lhIGRlIHF1ZSBsb3MgZXJyb3JlcyBzZSBkaXN0cmlidXlhbiBkZSB1bmEgbWFuZXJhIG5vcm1hbC4nIA0KDQpgYGANCg0KDQpgYGB7cn0NCiNDYWxjdWxhciBsYSBtYXRyaXogfFi0WHwNCmxpYnJhcnkoc3RhcmdhemVyKQ0KWF9tYXQ8LW1vZGVsLm1hdHJpeChtb2RlbG9fbGluZWFsKQ0Kc3RhcmdhemVyKGhlYWQoWF9tYXQsbj02KSx0eXBlPSJ0ZXh0IikNCmBgYA0KDQoNCmBgYHtyfQ0KWFhfbWF0cml4PC10KFhfbWF0KSUqJVhfbWF0DQpzdGFyZ2F6ZXIoWFhfbWF0cml4LHR5cGUgPSAidGV4dCIpDQpoZWFkKFhYX21hdHJpeCwgbj01KQ0KYGBgDQoNCg0KYGBge3J9DQojSW5kaWNlIGRlIENvbmRpY2nzbg0KI05vcm1hbGl6YWNpb24gfFi0WHwNCmxpYnJhcnkoc3RhcmdhemVyKQ0Kb3B0aW9ucyhzY2lwZW4gPSA5OTkpDQpTbjwtc29sdmUoZGlhZyhzcXJ0KGRpYWcoWFhfbWF0cml4KSkpKQ0Kc3RhcmdhemVyKFNuLHR5cGUgPSAidGV4dCIpDQpgYGANCg0KYGBge3J9DQojfFi0WHwgTm9ybWFsaXphZGENCmxpYnJhcnkoc3RhcmdhemVyKQ0KWFhfbm9ybTwtKFNuJSolWFhfbWF0cml4KSUqJVNuDQpzdGFyZ2F6ZXIoWFhfbm9ybSx0eXBlID0gInRleHQiLGRpZ2l0cyA9IDQpDQpgYGANCg0KDQpgYGB7cn0NCiNBdXRvdmFsb3JlcyBkZSB8WLRYfCBOb3JtYWxpemFkYToNCmxpYnJhcnkoc3RhcmdhemVyKQ0KI2F1dG92YWxvcmVzDQpsYW1iZGFzPC1laWdlbihYWF9ub3JtLHN5bW1ldHJpYyA9IFRSVUUpDQpzdGFyZ2F6ZXIobGFtYmRhcyR2YWx1ZXMsdHlwZSA9ICJ0ZXh0IikNCmBgYA0KDQoNCmBgYHtyfQ0KSzwtc3FydChtYXgobGFtYmRhcyR2YWx1ZXMpL21pbihsYW1iZGFzJHZhbHVlcykpDQpwcmludChLKQ0KYGBgDQoNCg0KYGBge3J9DQojUHJ1ZWJhIGRlIEZhcnJhci1HbGF1YmFyDQojQ2FsY3VsbyBkZSB8UnwNCmxpYnJhcnkoc3RhcmdhemVyKQ0KWm48LXNjYWxlKFhfbWF0WywtMV0pDQpzdGFyZ2F6ZXIoaGVhZChabixuPTYpLHR5cGUgPSAidGV4dCIpDQpgYGANCg0KDQoNCmBgYHtyfQ0KI0NhbGN1bGFyIGxhIG1hdHJpeiBSDQpsaWJyYXJ5KHN0YXJnYXplcikNCm48LW5yb3coWm4pDQpSPC0odChabiklKiVabikqKDEvKG4tMSkpDQojVGFtYmnpbiBzZSBwdWVkZSBjYWxjdWxhciBSIGEgdHJhdulzIGRlIGNvcihYX21hdFssLTFdKQ0Kc3RhcmdhemVyKFIsdHlwZSA9ICJ0ZXh0IixkaWdpdHMgPSA0KQ0KYGBgDQoNCg0KDQpgYGB7cn0NCiNEZXRlcm1pbmFudGUgZGUgbGEgbWF0cml6IFINCmRldGVybWluYW50ZV9SPC1kZXQoUikNCnByaW50KGRldGVybWluYW50ZV9SKQ0KYGBgDQoNCg0KDQpgYGB7cn0NCiNBcGxpY2FuZG8gbGEgcHJ1ZWJhIGRlIEZhcnJlciBHbGF1YmFyIChCYXJ0bGV0dCkNCiNFc3RhZGlzdGljbyBYMkZHDQptPC1uY29sKFhfbWF0WywtMV0pDQpuPC1ucm93KFhfbWF0WywtMV0pDQpjaGlfRkc8LS0obi0xLSgyKm0rNSkvNikqbG9nKGRldGVybWluYW50ZV9SKQ0KcHJpbnQoY2hpX0ZHKQ0KYGBgDQoNCg0KDQpgYGB7cn0NCiMgVmFsb3IgY3JpdGljbyANCmdsPC1tKihtLTEpLzINClZDPC1xY2hpc3EocCA9IDAuOTUsZGYgPSBnbCkNCnByaW50KFZDKQ0KYGBgDQoNCg0KYGBge3J9DQojVklGtHMgcGFyYSBlbCBtb2RlbG8gZXN0aW1hZG86DQpsaWJyYXJ5KGNhcikNClZJRnNfY2FyPC12aWYobW9kZWxvX2xpbmVhbCkNCnByaW50KFZJRnNfY2FyKQ0KYGBgDQoNCg0KYGBge3J9DQpsaWJyYXJ5KG1jdGVzdCkNCm1jLnBsb3QoeCA9IFhfbWF0WywtMV0seSA9IE1PREVMT19ERV9JTkdSRVNPMSR5LHZpZiA9IDMsKQ0KDQpgYGANCg0KDQoNCg0KDQoNCg==