#INPUT DATA
library(ggplot2)
library(lmtest)
## Warning: package 'lmtest' was built under R version 4.5.2
## Loading required package: zoo
## Warning: package 'zoo' was built under R version 4.5.2
##
## Attaching package: 'zoo'
## The following objects are masked from 'package:base':
##
## as.Date, as.Date.numeric
library(outliers)
## Warning: package 'outliers' was built under R version 4.5.2
library(car)
## Loading required package: carData
## Warning: package 'carData' was built under R version 4.5.2
# Input data
df <- read.csv("C:/Users/User/Downloads/Data_Handson_anreg_fixed1.csv",
sep=";",
header=TRUE)
data <- df[, c("Work_Experience", "Income")]
data$Work_Experience <- as.numeric(data$Work_Experience)
data$Income <- as.numeric(data$Income)
data <- na.omit(data)
#MISSING VALUE
# Cek Missing Value
cat("\n=== Missing Value ===\n")
##
## === Missing Value ===
total_missing <- sum(is.na(data))
total_missing
## [1] 0
#OUTLIER
# Outlier
detect_outliers <- function(data) {
cat("\n=== Deteksi Outlier ===\n\n")
outlier_report <- list()
for(var in names(data)[sapply(data, is.numeric)]) {
cat("Analisis outlier untuk variabel:", var, "\n")
# Statistik deskriptif
stats <- summary(data[[var]])
iqr_val <- IQR(data[[var]], na.rm = TRUE)
q1 <- quantile(data[[var]], 0.25, na.rm = TRUE)
q3 <- quantile(data[[var]], 0.75, na.rm = TRUE)
lower_bound <- q1 - 1.5 * iqr_val
upper_bound <- q3 + 1.5 * iqr_val
# Deteksi outlier dengan metode IQR
outliers_iqr <- data[[var]][data[[var]] < lower_bound | data[[var]] > upper_bound]
# Deteksi outlier dengan metode Z-score
z_scores <- scale(data[[var]])
outliers_z <- data[[var]][abs(z_scores) > 3]
# Deteksi outlier dengan metode Grubbs (uji statistik)
if(length(na.omit(data[[var]])) > 6) {
tryCatch({
grubbs_test <- grubbs.test(na.omit(data[[var]]))
grubbs_outlier <- ifelse(grubbs_test$p.value < 0.05, "Terdeteksi", "Tidak terdeteksi")
}, error = function(e) {
grubbs_outlier <- "Tidak dapat dihitung"
})
} else {
grubbs_outlier <- "Data tidak cukup"
}
# Ringkasan
outlier_report[[var]] <- list(
n_outliers_iqr = length(outliers_iqr),
n_outliers_z = length(outliers_z),
grubbs_result = grubbs_outlier,
lower_bound = lower_bound,
upper_bound = upper_bound,
outlier_values = unique(round(outliers_iqr, 2))
)
cat(" - Outlier (IQR method):", length(outliers_iqr), "\n")
cat(" - Outlier (Z-score > 3):", length(outliers_z), "\n")
cat(" - Uji Grubbs:", grubbs_outlier, "\n")
cat(" - Batas bawah:", round(lower_bound, 2), "\n")
cat(" - Batas atas:", round(upper_bound, 2), "\n")
if(length(outliers_iqr) > 0) {
cat(" - Nilai outlier:", paste(head(unique(round(outliers_iqr, 2)), 5), collapse = ", "), "\n")
}
cat("\n")
}
return(outlier_report)
}
# Deteksi outlier
outlier_analysis <- detect_outliers(data)
##
## === Deteksi Outlier ===
##
## Analisis outlier untuk variabel: Work_Experience
## - Outlier (IQR method): 0
## - Outlier (Z-score > 3): 0
## - Uji Grubbs: Tidak terdeteksi
## - Batas bawah: -25.5
## - Batas atas: 74.5
##
## Analisis outlier untuk variabel: Income
## - Outlier (IQR method): 957
## - Outlier (Z-score > 3): 188
## - Uji Grubbs: Terdeteksi
## - Batas bawah: -350190.6
## - Batas atas: 765668.4
## - Nilai outlier: 3584362, 5188124, 9892000, 1011213, 3856805
# Visual
visualize_outliers <- function(data) {
numeric_vars <- names(data)[sapply(data, is.numeric)]
# Boxplot untuk setiap variabel numerik
par(mfrow = c(2, 3))
for(var in numeric_vars[1:min(6, length(numeric_vars))]) {
boxplot(data[[var]], main = var, col = "lightblue",
ylab = "Income", outline = TRUE)
grid()
}
par(mfrow = c(1, 1))
# Histogram dengan overlay outlier
for(var in numeric_vars[1:min(3, length(numeric_vars))]) {
# Hitung batas outlier
q1 <- quantile(data[[var]], 0.25, na.rm = TRUE)
q3 <- quantile(data[[var]], 0.75, na.rm = TRUE)
iqr_val <- IQR(data[[var]], na.rm = TRUE)
lower_bound <- q1 - 1.5 * iqr_val
upper_bound <- q3 + 1.5 * iqr_val
# Identifikasi outlier
is_outlier <- data[[var]] < lower_bound | data[[var]] > upper_bound
# Plot histogram
hist_data <- ggplot(data.frame(value = data[[var]]), aes(x = value)) +
geom_histogram(aes(y = ..density..), bins = 30, fill = "lightblue", alpha = 0.7) +
geom_density(color = "darkblue", linewidth = 1) +
geom_vline(xintercept = c(lower_bound, upper_bound),
color = "red", linetype = "dashed", linewidth = 1) +
labs(title = paste("Distribusi dan Outlier:", var),
x = var, y = "Density") +
theme_minimal() +
annotate("text", x = lower_bound, y = 0,
label = "Bawah", vjust = 2, color = "red") +
annotate("text", x = upper_bound, y = 0,
label = "Atas", vjust = 2, color = "red")
print(hist_data)
}
# Scatter plot matrix untuk melihat outlier multivariat
if(length(numeric_vars) >= 3) {
pairs(data[, numeric_vars[1:min(4, length(numeric_vars))]],
main = "Scatter Plot Matrix untuk Deteksi Outlier",
pch = 19, col = alpha("blue", 0.6))
}
}
# Jalankan visualisasi
visualize_outliers(data)
## Warning: The dot-dot notation (`..density..`) was deprecated in ggplot2 3.4.0.
## ℹ Please use `after_stat(density)` instead.
## This warning is displayed once per session.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
#STATISTIK DESKRIPTIF
# ANALISIS DESKRIPTIF DAN VISUALISASI
print("Statistik Deskriptif:")
## [1] "Statistik Deskriptif:"
summary(data)
## Work_Experience Income
## Min. : 0.00 Min. : 31127
## 1st Qu.:12.00 1st Qu.: 68257
## Median :25.00 Median : 72875
## Mean :24.76 Mean : 814278
## 3rd Qu.:37.00 3rd Qu.: 347221
## Max. :50.00 Max. :9992571
# Scatter plot
ggplot(data, aes(x = Work_Experience, y = Income)) +
geom_point(color = "blue", size = 3) +
labs(title = "Hubungan Pengalaman Kerja dan Penghasilan",
x = "Work_Experience", y = "Income") +
theme_minimal()
# Korelasi
cor_test <- cor.test(data$Work_Experience, data$Income)
print(paste("Korelasi Pearson:", round(cor_test$estimate, 4)))
## [1] "Korelasi Pearson: -0.0265"
print(paste("p-value korelasi:", round(cor_test$p.value, 4)))
## [1] "p-value korelasi: 0.0611"
#ANALISIS REGRESI
# Membangun Model Regresi
model <- lm(Income ~ Work_Experience, data = data)
print("Ringkasan Model Regresi:")
## [1] "Ringkasan Model Regresi:"
summary(model)
##
## Call:
## lm(formula = Income ~ Work_Experience, data = data)
##
## Residuals:
## Min 1Q Median 3Q Max
## -859345 -762182 -704427 -476249 9241544
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 895662 50535 17.724 <2e-16 ***
## Work_Experience -3287 1755 -1.873 0.0611 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 1824000 on 4996 degrees of freedom
## Multiple R-squared: 0.0007016, Adjusted R-squared: 0.0005016
## F-statistic: 3.508 on 1 and 4996 DF, p-value: 0.06115
# UJI ASUMSI REGRESI LINEAR
cat("\n=== UJI ASUMSI REGRESI LINEAR ===\n")
##
## === UJI ASUMSI REGRESI LINEAR ===
# Normalitas Residual
shapiro_test <- shapiro.test(residuals(model))
cat("1. UJI NORMALITAS (Shapiro-Wilk):\n")
## 1. UJI NORMALITAS (Shapiro-Wilk):
cat(" Statistik W =", round(shapiro_test$statistic, 4), "\n")
## Statistik W = 0.4914
cat(" p-value =", round(shapiro_test$p.value, 4), "\n")
## p-value = 0
if(shapiro_test$p.value > 0.05) {
cat(" Keputusan: Residual berdistribusi normal\n")
} else {
cat(" Keputusan: Residual tidak normal\n")
}
## Keputusan: Residual tidak normal
# Q-Q Plot
qqnorm(residuals(model), main = "Q-Q Plot Residual")
qqline(residuals(model), col = "red")
# Homoskedastisitas
bp_test <- bptest(model)
cat("\n2. UJI HOMOSKEDASTISITAS (Breusch-Pagan):\n")
##
## 2. UJI HOMOSKEDASTISITAS (Breusch-Pagan):
cat(" Statistik LM =", round(bp_test$statistic, 4), "\n")
## Statistik LM = 1.7834
cat(" p-value =", round(bp_test$p.value, 4), "\n")
## p-value = 0.1817
if(bp_test$p.value > 0.05) {
cat(" Keputusan: Varian residual homogen\n")
} else {
cat(" Keputusan: Ada heteroskedastisitas\n")
}
## Keputusan: Varian residual homogen
# Plot Residual vs Fitted
plot(fitted(model), residuals(model),
main = "Residual vs Fitted Values",
xlab = "Fitted Values", ylab = "Residuals",
pch = 19, col = "blue")
abline(h = 0, col = "red", lty = 2)
# Tidak ada Autokorelasi
dw_test <- dwtest(model)
cat("\n3. UJI AUTOKORELASI (Durbin-Watson):\n")
##
## 3. UJI AUTOKORELASI (Durbin-Watson):
cat(" Statistik DW =", round(dw_test$statistic, 4), "\n")
## Statistik DW = 2.0377
cat(" p-value =", round(dw_test$p.value, 4), "\n")
## p-value = 0.9087
if(dw_test$p.value > 0.05) {
cat(" Keputusan: Tidak ada autokorelasi\n")
} else {
cat(" Keputusan: Ada autokorelasi\n")
}
## Keputusan: Tidak ada autokorelasi
# INTERPRETASI KOEFISIEN
cat("\n=== INTERPRETASI KOEFISIEN ===\n")
##
## === INTERPRETASI KOEFISIEN ===
intercept <- coef(model)[1]
slope <- coef(model)[2]
cat("Persamaan Regresi: Penghasilan =", round(intercept, 2), "+", round(slope, 2), "* Pengalaman Kerja\n")
## Persamaan Regresi: Penghasilan = 895662 + -3286.83 * Pengalaman Kerja
cat("\nInterpretasi:\n")
##
## Interpretasi:
cat("1. Intercept (β0 =", round(intercept, 2), "):\n")
## 1. Intercept (β0 = 895662 ):
cat(" Penghasilan ketika Pengalaman Kerja = 0 adalah", round(intercept, 2))
## Penghasilan ketika Pengalaman Kerja = 0 adalah 895662
cat("2. Slope (β1 =", round(slope, 2), "):\n")
## 2. Slope (β1 = -3286.83 ):
cat(" Setiap penambahan 1 Pengalaman Kerja, Penghasilan meningkat", round(slope, 2))
## Setiap penambahan 1 Pengalaman Kerja, Penghasilan meningkat -3286.83
# ESTIMASI PARAMETER DAN INFERENSI
cat("\n=== ESTIMASI PARAMETER ===\n")
##
## === ESTIMASI PARAMETER ===
conf_int <- confint(model, level = 0.95)
cat("Interval Kepercayaan 95%:\n")
## Interval Kepercayaan 95%:
cat(" Intercept: [", round(conf_int[1,1], 3), ", ", round(conf_int[1,2], 3), "]\n", sep = "")
## Intercept: [796591.7, 994732.3]
cat(" Slope: [", round(conf_int[2,1], 3), ", ", round(conf_int[2,2], 3), "]\n", sep = "")
## Slope: [-6727.37, 153.709]
# Uji hipotesis untuk slope
cat("\nUji Hipotesis untuk Slope (β1):\n")
##
## Uji Hipotesis untuk Slope (β1):
cat(" H0: β1 = 0 (tidak ada hubungan linear)\n")
## H0: β1 = 0 (tidak ada hubungan linear)
cat(" H1: β1 ≠ 0 (ada hubungan linear)\n")
## H1: β1 ≠ 0 (ada hubungan linear)
summary_model <- summary(model)
slope_pvalue <- summary_model$coefficients[2, 4]
cat(" p-value =", round(slope_pvalue, 6), "\n")
## p-value = 0.061147
if(slope_pvalue < 0.05) {
cat(" Keputusan: Tolak H0, ada hubungan linear signifikan\n")
} else {
cat(" Keputusan: Gagal tolak H0, tidak ada hubungan linear signifikan\n")
}
## Keputusan: Gagal tolak H0, tidak ada hubungan linear signifikan
# KOEFISIEN DETERMINASI
r_squared <- summary_model$r.squared
cat("\nKoefisien Determinasi (R²):\n")
##
## Koefisien Determinasi (R²):
cat(" R² =", round(r_squared, 4), "\n")
## R² = 7e-04
cat(" Artinya:", round(r_squared * 100, 2), "% Work Experience tidak mampu menjelaskan variasi Income\n")
## Artinya: 0.07 % Work Experience tidak mampu menjelaskan variasi Income
# VISUALISASI MODEL
ggplot(data, aes(x = Work_Experience, y = Income)) +
geom_point(color = "blue", size = 3) +
geom_smooth(method = "lm", se = TRUE, color = "red", fill = "pink") +
labs(title = "Garis Regresi Linear",
subtitle = paste("Y =", round(intercept, 2), "+", round(slope, 2), "X"),
x = "Work_Experience", y = "Income") +
theme_minimal()
## `geom_smooth()` using formula = 'y ~ x'
# PREDIKSI
new_data <- data.frame(Work_Experience = c(17, 38))
prediction <- predict(model, newdata = new_data, interval = "confidence")
cat("\n=== PREDIKSI ===\n")
##
## === PREDIKSI ===
cat("Untuk Pengalaman Kerja 17, prediksi Penghasilan =", round(prediction[1, "fit"], 2), "\n")
## Untuk Pengalaman Kerja 17, prediksi Penghasilan = 839785.9
cat("Untuk Pengalaman Kerja 38, prediksi Penghasilan =", round(prediction[2, "fit"], 2), "\n")
## Untuk Pengalaman Kerja 38, prediksi Penghasilan = 770762.4
# DIAGNOSTIC PLOTS
par(mfrow = c(2, 2))
plot(model, which = 1:4)
par(mfrow = c(1, 1))
# RINGKASAN LENGKAP
cat("\n=== RINGKASAN ANALISIS ===\n")
##
## === RINGKASAN ANALISIS ===
cat("1. Model: Income = β0 + β1*Work_Experience + ε\n")
## 1. Model: Income = β0 + β1*Work_Experience + ε
cat("2. Estimasi: Y =", round(intercept, 3), "+", round(slope, 3), "* X\n")
## 2. Estimasi: Y = 895662 + -3286.83 * X
cat("3. R² =", round(r_squared, 4), "(", round(r_squared*100, 1), "%)\n")
## 3. R² = 7e-04 ( 0.1 %)
cat("4. Uji F (model): p-value =",
round(summary_model$fstatistic[1], 4), "\n")
## 4. Uji F (model): p-value = 3.5076
cat("5. Asumsi:\n")
## 5. Asumsi:
cat(" - Normalitas: p =", round(shapiro_test$p.value, 4), "\n")
## - Normalitas: p = 0
cat(" - Homoskedastisitas: p =", round(bp_test$p.value, 4), "\n")
## - Homoskedastisitas: p = 0.1817
cat(" - Autokorelasi: p =", round(dw_test$p.value, 4), "\n")
## - Autokorelasi: p = 0.9087
# Simpan hasil
hasil <- list(
model = model,
coefficients = coef(model),
r_squared = r_squared,
assumptions = list(
normality = shapiro_test$p.value,
homoscedasticity = bp_test$p.value,
autocorrelation = dw_test$p.value
),
confidence_intervals = conf_int
)
```