library(tidyverse)
## Warning: package 'tidyverse' was built under R version 4.4.3
## Warning: package 'ggplot2' was built under R version 4.4.3
## Warning: package 'lubridate' was built under R version 4.4.3
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ forcats   1.0.0     ✔ readr     2.1.5
## ✔ ggplot2   3.5.2     ✔ stringr   1.5.1
## ✔ lubridate 1.9.4     ✔ tibble    3.2.1
## ✔ purrr     1.0.2     ✔ tidyr     1.3.1
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag()    masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(caret)
## Warning: package 'caret' was built under R version 4.4.3
## Loading required package: lattice
## 
## Attaching package: 'caret'
## The following object is masked from 'package:purrr':
## 
##     lift
library(e1071)
library(rpart)
library(randomForest)
## Warning: package 'randomForest' was built under R version 4.4.3
## randomForest 4.7-1.2
## Type rfNews() to see new features/changes/bug fixes.
## 
## Attaching package: 'randomForest'
## The following object is masked from 'package:ggplot2':
## 
##     margin
## The following object is masked from 'package:dplyr':
## 
##     combine

soal 1: Data Cleaning dan Eksplorasi (30%)

#1. Identifikasi dan Tangani Missing Value, Outlier, dan Inkonsistensi

# Membaca data
file_path <- "C:/Users/USER/Downloads/kualitasair.xlsx"
data <- read_excel(file_path, sheet = 1)

# Melihat struktur data
str(data)
## tibble [300 × 7] (S3: tbl_df/tbl/data.frame)
##  $ Lokasi: chr [1:300] "S1" "S2" "S3" "S4" ...
##  $ pH    : num [1:300] 7.69 6.72 7.18 7.32 7.2 ...
##  $ DO    : num [1:300] NA 5.72 4.89 6.13 7.79 ...
##  $ BOD   : num [1:300] 1.71 1.44 2.73 3.14 1.18 ...
##  $ TSS   : num [1:300] 43.1 44.3 NA 41 48.1 ...
##  $ Suhu  : num [1:300] 26.8 27.7 26 29.7 26.4 ...
##  $ Status: chr [1:300] "Tercemar ringan" "Tercemar ringan" "Tercemar ringan" "Tercemar ringan" ...
summary(data)
##     Lokasi                pH              DO             BOD        
##  Length:300         Min.   :5.503   Min.   :2.982   Min.   :0.3026  
##  Class :character   1st Qu.:6.670   1st Qu.:5.375   1st Qu.:2.3573  
##  Mode  :character   Median :6.988   Median :5.991   Median :3.0661  
##                     Mean   :6.989   Mean   :5.976   Mean   :3.0005  
##                     3rd Qu.:7.318   3rd Qu.:6.688   3rd Qu.:3.5781  
##                     Max.   :8.351   Max.   :9.229   Max.   :5.7962  
##                                     NA's   :23      NA's   :22      
##       TSS             Suhu          Status         
##  Min.   :24.65   Min.   :22.77   Length:300        
##  1st Qu.:43.73   1st Qu.:26.62   Class :character  
##  Median :49.52   Median :28.01   Mode  :character  
##  Mean   :49.70   Mean   :28.31                     
##  3rd Qu.:56.44   3rd Qu.:29.46                     
##  Max.   :76.34   Max.   :90.00                     
##  NA's   :24
# --- 1a. Cek Missing Value ---
colSums(is.na(data))
## Lokasi     pH     DO    BOD    TSS   Suhu Status 
##      0      0     23     22     24      0      0
# Mengganti missing value pada kolom numerik dengan median
num_cols <- c("pH", "DO", "BOD", "TSS", "Suhu")
for (col in num_cols) {
  data[[col]][is.na(data[[col]])] <- median(data[[col]], na.rm = TRUE)
}

# --- 1b. Deteksi Outlier dengan IQR ---
for (col in num_cols) {
  Q1 <- quantile(data[[col]], 0.25, na.rm = TRUE)
  Q3 <- quantile(data[[col]], 0.75, na.rm = TRUE)
  IQR <- Q3 - Q1
  batas_bawah <- Q1 - 1.5 * IQR
  batas_atas <- Q3 + 1.5 * IQR
  
  # Ganti outlier dengan NA lalu isi median
  data[[col]][data[[col]] < batas_bawah | data[[col]] > batas_atas] <- median(data[[col]], na.rm = TRUE)
}

# --- 1c. Cek Inkonsistensi Kategori ---
unique(data$Status)
## [1] "Tercemar ringan" "baik"            "BAIK"            "Baik"           
## [5] "tercemar ringan" "Tercemar Ringan" "Tercemar berat"

Penjelasan:

2. Standarisasi Penulisan Kategori Status

# ======= Kode final (robust) untuk Standarisasi kolom "Status" =======

# 1. Load paket
library(readxl)
library(dplyr)
library(stringr)

# 2. Baca file (pastikan working dir atau path benar)
data <- read_excel("C:/Users/USER/Downloads/kualitasair.xlsx")

# 3. Temukan kolom yang mengandung kata "status" (case-insensitive)
status_idx <- grep("status", tolower(names(data)))
if(length(status_idx) == 0){
  stop("Kolom 'status' tidak ditemukan di dataset. Cek nama kolom pada data kamu: ", paste(names(data), collapse = ", "))
}
# jika ada lebih dari 1, ambil yang pertama
status_col <- names(data)[status_idx[1]]
message("Kolom Status yang digunakan: '", status_col, "'")
## Kolom Status yang digunakan: 'Status'
# 4. Simpan kolom asli
data$Status_asli <- data[[status_col]]

# 5. Ambil vektor status sebagai karakter (aman untuk factor/numeric/character)
s <- data[[status_col]]
# jika faktor -> ubah jadi karakter
if(is.factor(s)) s <- as.character(s)
# ubah numeric jadi karakter juga (misal 1 -> "1")
s <- as.character(s)

# 6. Normalisasi string: lowercase, hilangkan spasi depan/akhir, ganti tanda umum jadi spasi, hapus non-alnum kecuali spasi, rapikan spasi
s_clean <- tolower(s)
s_clean <- trimws(s_clean)
s_clean <- str_replace_all(s_clean, "[=,_\\-\\/]+", " ")   # ganti =,_-/ jadi spasi
s_clean <- str_replace_all(s_clean, "[^a-z0-9 ]", "")      # hapus char selain a-z,0-9,spasi
s_clean <- str_squish(s_clean)

# 7. Buat vektor hasil (NA awalnya)
status_standard <- rep(NA_character_, length(s_clean))

# 8. Pemetaan eksplisit untuk angka/variasi umum
# Tangani "1", "2", "3" dan variasinya
ix1 <- which(!is.na(s_clean) & s_clean %in% c("1","1.0","1.00"))
ix2 <- which(!is.na(s_clean) & s_clean %in% c("2","2.0","2.00"))
ix3 <- which(!is.na(s_clean) & s_clean %in% c("3","3.0","3.00"))
status_standard[ix1] <- "Baik"
status_standard[ix2] <- "Tercemar Ringan"
status_standard[ix3] <- "Tercemar Berat"

# 9. Pemetaan berdasarkan kata kunci (aman meskipun sebelumnya sudah terisi)
ix_baik   <- which(!is.na(s_clean) & str_detect(s_clean, "\\bbaik\\b"))
ix_ringan <- which(!is.na(s_clean) & str_detect(s_clean, "ringan"))
ix_berat  <- which(!is.na(s_clean) & str_detect(s_clean, "berat"))

status_standard[ix_baik]   <- "Baik"
status_standard[ix_ringan] <- "Tercemar Ringan"
status_standard[ix_berat]  <- "Tercemar Berat"

# 10. Beberapa variasi tulisan khusus (gabungan tanpa spasi, e.g., "tercemarringan")
ix_special_r <- which(!is.na(s_clean) & str_detect(s_clean, "tercemar.*ringan|tercemarringan|ringan"))
ix_special_b <- which(!is.na(s_clean) & str_detect(s_clean, "tercemar.*berat|tercemarberat|berat"))

status_standard[ix_special_r] <- "Tercemar Ringan"
status_standard[ix_special_b] <- "Tercemar Berat"

# 11. Jika ada yang masih NA tetapi mengandung angka dalam string, coba ekstrak angka 1/2/3
ix_numeric_like <- which(is.na(status_standard) & !is.na(s_clean) & str_detect(s_clean, "[0-9]"))
if(length(ix_numeric_like) > 0){
  nums_extracted <- str_extract(s_clean[ix_numeric_like], "[123]")
  status_standard[ix_numeric_like[which(nums_extracted == "1")]] <- "Baik"
  status_standard[ix_numeric_like[which(nums_extracted == "2")]] <- "Tercemar Ringan"
  status_standard[ix_numeric_like[which(nums_extracted == "3")]] <- "Tercemar Berat"
}

# 12. Simpan hasil ke data.frame dengan nama kolom baru 'Status'
data$Status <- status_standard

# 13. Ubah jadi faktor dengan urutan logis
data$Status <- factor(data$Status,
                      levels = c("Baik", "Tercemar Ringan", "Tercemar Berat"))

# 14. Cek hasil: frekuensi dan baris yang belum ter-map (NA)
cat("\nFrekuensi kategori setelah standarisasi:\n")
## 
## Frekuensi kategori setelah standarisasi:
print(table(data$Status, useNA = "ifany"))
## 
##            Baik Tercemar Ringan  Tercemar Berat 
##              72             221               7
na_rows <- which(is.na(data$Status))
if(length(na_rows) > 0){
  cat("\nBaris dengan Status yang TIDAK ter-standarisasi (NA):\n")
  print(data[na_rows, c(status_col, "Status_asli")])
} else {
  cat("\nSemua baris berhasil ter-standarisasi.\n")
}
## 
## Semua baris berhasil ter-standarisasi.
# 15. (Opsional) Tampilkan beberapa baris perbandingan
cat("\nContoh perbandingan Status asli vs Status standar:\n")
## 
## Contoh perbandingan Status asli vs Status standar:
print(head(data[, c("Status_asli", "Status")], 20))
## # A tibble: 20 × 2
##    Status_asli     Status         
##    <chr>           <fct>          
##  1 Tercemar ringan Tercemar Ringan
##  2 Tercemar ringan Tercemar Ringan
##  3 Tercemar ringan Tercemar Ringan
##  4 Tercemar ringan Tercemar Ringan
##  5 baik            Baik           
##  6 Tercemar ringan Tercemar Ringan
##  7 Tercemar ringan Tercemar Ringan
##  8 Tercemar ringan Tercemar Ringan
##  9 Tercemar ringan Tercemar Ringan
## 10 BAIK            Baik           
## 11 Tercemar ringan Tercemar Ringan
## 12 Tercemar ringan Tercemar Ringan
## 13 Baik            Baik           
## 14 Tercemar ringan Tercemar Ringan
## 15 Tercemar ringan Tercemar Ringan
## 16 Baik            Baik           
## 17 Tercemar ringan Tercemar Ringan
## 18 Baik            Baik           
## 19 Tercemar ringan Tercemar Ringan
## 20 tercemar ringan Tercemar Ringan
# 16. (Opsional) Simpan hasil bersih ke file baru
write.csv(data, "kualitasair_status_standardized.csv", row.names = FALSE)
cat("\nFile 'kualitasair_status_standardized.csv' telah disimpan di working directory.\n")
## 
## File 'kualitasair_status_standardized.csv' telah disimpan di working directory.

Penjelasan Singkat

Semua variasi tulisan (misal baik=1, Baik, 1, tercemarringan, 3, Tercemar berat) dibersihkan dan dikonversi ke tiga kategori:

Nilai lain yang tidak sesuai otomatis menjadi NA agar mudah dicek ulang.

3.Ringkasan statistik deskriptif setelah pembersihan

library(ggplot2)

# Summary numerik
summary_stats <- data %>%
  select(all_of(num_cols)) %>%
  summary()
print(summary_stats)
##        pH              DO             BOD              TSS       
##  Min.   :5.503   Min.   :2.982   Min.   :0.3026   Min.   :24.65  
##  1st Qu.:6.670   1st Qu.:5.375   1st Qu.:2.3573   1st Qu.:43.73  
##  Median :6.988   Median :5.991   Median :3.0661   Median :49.52  
##  Mean   :6.989   Mean   :5.976   Mean   :3.0005   Mean   :49.70  
##  3rd Qu.:7.318   3rd Qu.:6.688   3rd Qu.:3.5781   3rd Qu.:56.44  
##  Max.   :8.351   Max.   :9.229   Max.   :5.7962   Max.   :76.34  
##                  NA's   :23      NA's   :22       NA's   :24     
##       Suhu      
##  Min.   :22.77  
##  1st Qu.:26.62  
##  Median :28.01  
##  Mean   :28.31  
##  3rd Qu.:29.46  
##  Max.   :90.00  
## 
# Ringkasan statistik yang lebih lengkap (mean, sd, median, min, max)
deskriptif <- data %>%
  summarise(across(all_of(num_cols),
                   list(mean = ~mean(. , na.rm=TRUE),
                        sd   = ~sd(. , na.rm=TRUE),
                        median = ~median(. , na.rm=TRUE),
                        min = ~min(. , na.rm=TRUE),
                        max = ~max(. , na.rm=TRUE)),
                   .names = "{col}_{fn}"))
# Tampilkan transposed agar mudah dibaca
t(deskriptif)
##                   [,1]
## pH_mean      6.9891110
## pH_sd        0.4936295
## pH_median    6.9880500
## pH_min       5.5035000
## pH_max       8.3509000
## DO_mean      5.9762939
## DO_sd        0.9996836
## DO_median    5.9909000
## DO_min       2.9821000
## DO_max       9.2291000
## BOD_mean     3.0005223
## BOD_sd       0.8461546
## BOD_median   3.0661000
## BOD_min      0.3026000
## BOD_max      5.7962000
## TSS_mean    49.6980971
## TSS_sd       9.6357424
## TSS_median  49.5220500
## TSS_min     24.6492000
## TSS_max     76.3371000
## Suhu_mean   28.3136913
## Suhu_sd      4.1217784
## Suhu_median 28.0148500
## Suhu_min    22.7727000
## Suhu_max    90.0000000
# Distribusi kategori Status_clean
table(data$Status_clean, useNA = "ifany")
## Warning: Unknown or uninitialised column: `Status_clean`.
## < table of extent 0 >

Soal 2 : Klasifikasi Status Kualitas Air (35%)

1 — Gunakan variabel numerik (pH, DO, BOD, TSS, Suhu) untuk mengklasifikasikan Status

# Pastikan library terpasang
library(dplyr)

# Pilih variabel numerik dan variabel target
num_cols <- c("pH", "DO", "BOD", "TSS", "Suhu")

# Pastikan Status adalah faktor (kategori)
data$Status <- as.factor(data$Status)

# Cek struktur data
str(data[, c(num_cols, "Status")])
## tibble [300 × 6] (S3: tbl_df/tbl/data.frame)
##  $ pH    : num [1:300] 7.69 6.72 7.18 7.32 7.2 ...
##  $ DO    : num [1:300] NA 5.72 4.89 6.13 7.79 ...
##  $ BOD   : num [1:300] 1.71 1.44 2.73 3.14 1.18 ...
##  $ TSS   : num [1:300] 43.1 44.3 NA 41 48.1 ...
##  $ Suhu  : num [1:300] 26.8 27.7 26 29.7 26.4 ...
##  $ Status: Factor w/ 3 levels "Baik","Tercemar Ringan",..: 2 2 2 2 1 2 2 2 2 1 ...

#SOAL 2.2 — Bagi data menjadi Training dan Testing

library(caret)

set.seed(123)  # agar hasil pembagian sama setiap kali dijalankan
train_index <- createDataPartition(data$Status, p = 0.8, list = FALSE)
train_data <- data[train_index, ]
test_data  <- data[-train_index, ]

# Cek jumlah data
cat("Jumlah data training:", nrow(train_data), "\n")
## Jumlah data training: 241
cat("Jumlah data testing :", nrow(test_data), "\n")
## Jumlah data testing : 59

Penjelasan: Data dibagi 80% untuk training (melatih model) dan 20% untuk testing (mengukur akurasi model).

3.Bangun Model Klasifikasi dengan SVM, Decision Tree, dan Random Forest

library(e1071)        # untuk SVM
library(rpart)        # untuk Decision Tree
library(randomForest) # untuk Random Forest
library(dplyr)

# 🔹 1️⃣ Pastikan data training bersih dari NA
train_data <- na.omit(train_data)

# 🔹 2️⃣ MODEL 1: SVM (Support Vector Machine)
svm_model <- svm(Status ~ pH + DO + BOD + TSS + Suhu,
                 data = train_data)

# 🔹 3️⃣ MODEL 2: Decision Tree
tree_model <- rpart(Status ~ pH + DO + BOD + TSS + Suhu,
                    data = train_data,
                    method = "class")

# 🔹 4️⃣ MODEL 3: Random Forest
rf_model <- randomForest(Status ~ pH + DO + BOD + TSS + Suhu,
                         data = train_data,
                         ntree = 100,
                         importance = TRUE,
                         na.action = na.omit)   # <--- penting: abaikan baris yang masih ada NA

# 🔹 5️⃣ Cek berhasil atau tidak
cat("Model SVM, Decision Tree, dan Random Forest berhasil dibangun tanpa error ✅\n")
## Model SVM, Decision Tree, dan Random Forest berhasil dibangun tanpa error ✅

Tiga model berbeda dibangun untuk memprediksi Status berdasarkan lima variabel numerik:

#4. Evaluasi hasil dengan Confusion Matrix dan Interpretasi Akurasi

library(caret)

# 🔹 1️⃣ Pastikan data testing bersih dari NA
test_data <- na.omit(test_data)

# 🔹 2️⃣ Prediksi dengan masing-masing model
svm_pred  <- predict(svm_model,  newdata = test_data)
tree_pred <- predict(tree_model, newdata = test_data, type = "class")
rf_pred   <- predict(rf_model,   newdata = test_data)

# 🔹 3️⃣ Buat confusion matrix untuk tiap model
conf_svm  <- confusionMatrix(svm_pred,  test_data$Status)
conf_tree <- confusionMatrix(tree_pred, test_data$Status)
conf_rf   <- confusionMatrix(rf_pred,   test_data$Status)

# 🔹 4️⃣ Tampilkan hasil akurasi masing-masing model
cat("\n================ HASIL EVALUASI MODEL ================\n")
## 
## ================ HASIL EVALUASI MODEL ================
cat("Akurasi SVM           :", round(conf_svm$overall["Accuracy"], 3), "\n")
## Akurasi SVM           : 0.878
cat("Akurasi Decision Tree :", round(conf_tree$overall["Accuracy"], 3), "\n")
## Akurasi Decision Tree : 0.98
cat("Akurasi Random Forest :", round(conf_rf$overall["Accuracy"], 3), "\n")
## Akurasi Random Forest : 0.959
# 🔹 5️⃣ Bandingkan semua akurasi dalam satu tabel
akurasi <- data.frame(
  Model = c("SVM", "Decision Tree", "Random Forest"),
  Akurasi = c(
    round(conf_svm$overall["Accuracy"], 3),
    round(conf_tree$overall["Accuracy"], 3),
    round(conf_rf$overall["Accuracy"], 3)
  )
)
cat("\n=== PERBANDINGAN AKURASI ===\n")
## 
## === PERBANDINGAN AKURASI ===
print(akurasi)
##           Model Akurasi
## 1           SVM   0.878
## 2 Decision Tree   0.980
## 3 Random Forest   0.959
# 🔹 6️⃣ (Opsional) Visualisasi akurasi dengan bar chart
library(ggplot2)
ggplot(akurasi, aes(x = Model, y = Akurasi, fill = Model)) +
  geom_bar(stat = "identity") +
  labs(title = "Perbandingan Akurasi Model Klasifikasi",
       x = "Model", y = "Akurasi") +
  theme_minimal()

# 🔹 7️⃣ (Opsional) Lihat variabel paling berpengaruh (Random Forest)
cat("\n=== PENTINGNYA VARIABEL (Random Forest) ===\n")
## 
## === PENTINGNYA VARIABEL (Random Forest) ===
print(importance(rf_model))
##           Baik Tercemar Ringan Tercemar Berat MeanDecreaseAccuracy
## pH   -0.197871      -1.4574083      -1.005038           -1.2617423
## DO   26.986501      26.5262552       3.673166           32.0248345
## BOD  29.602709      22.9660435       6.143374           30.1575572
## TSS  -0.251152      -0.4314936      -1.005038           -0.4276267
## Suhu -3.203691       0.1645667      -1.005038           -1.5030941
##      MeanDecreaseGini
## pH           2.441880
## DO          28.246648
## BOD         31.504272
## TSS          3.011060
## Suhu         3.063888
varImpPlot(rf_model)

Soal 3: Prediksi Variabel DO (35%)

1. Gunakan Regresi Linear dan Regresi Spline untuk memprediksi nilai DO berdasarkan pH, BOD, TSS, dan Suhu.

# Pastikan library sudah di-load
library(dplyr)
library(splines)

# Pilih variabel prediktor dan target
data_reg <- data %>% select(DO, pH, BOD, TSS, Suhu) %>% na.omit()

# --- Model 1: Regresi Linear
lm_model <- lm(DO ~ pH + BOD + TSS + Suhu, data = data_reg)
summary(lm_model)
## 
## Call:
## lm(formula = DO ~ pH + BOD + TSS + Suhu, data = data_reg)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.32935 -0.61312 -0.00914  0.68897  2.99811 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  5.8945131  1.0856565   5.429 1.41e-07 ***
## pH          -0.0336180  0.1306002  -0.257    0.797    
## BOD          0.1057450  0.0758015   1.395    0.164    
## TSS          0.0004416  0.0065840   0.067    0.947    
## Suhu        -0.0002627  0.0142544  -0.018    0.985    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9896 on 235 degrees of freedom
## Multiple R-squared:  0.008661,   Adjusted R-squared:  -0.008213 
## F-statistic: 0.5133 on 4 and 235 DF,  p-value: 0.7261
# --- Model 2: Regresi Spline
# Gunakan fungsi bs() (basis spline) untuk variabel yang mungkin nonlinier
spline_model <- lm(DO ~ bs(pH, df = 4) + bs(BOD, df = 4) + bs(TSS, df = 4) + bs(Suhu, df = 4),
                   data = data_reg)
summary(spline_model)
## 
## Call:
## lm(formula = DO ~ bs(pH, df = 4) + bs(BOD, df = 4) + bs(TSS, 
##     df = 4) + bs(Suhu, df = 4), data = data_reg)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.15767 -0.59536 -0.01192  0.60937  2.48092 
## 
## Coefficients:
##                   Estimate Std. Error t value Pr(>|t|)    
## (Intercept)         5.8147     1.3297   4.373 1.88e-05 ***
## bs(pH, df = 4)1    -0.2679     0.8880  -0.302  0.76322    
## bs(pH, df = 4)2     0.4239     0.6666   0.636  0.52551    
## bs(pH, df = 4)3    -0.6445     0.8322  -0.775  0.43945    
## bs(pH, df = 4)4     0.7143     0.8222   0.869  0.38591    
## bs(BOD, df = 4)1    1.0841     1.2048   0.900  0.36917    
## bs(BOD, df = 4)2    1.1675     0.8285   1.409  0.16018    
## bs(BOD, df = 4)3    0.4848     1.1159   0.434  0.66440    
## bs(BOD, df = 4)4    3.3643     1.0689   3.147  0.00187 ** 
## bs(TSS, df = 4)1   -0.3672     0.8751  -0.420  0.67517    
## bs(TSS, df = 4)2    0.2358     0.5905   0.399  0.69007    
## bs(TSS, df = 4)3   -0.2787     0.8114  -0.344  0.73151    
## bs(TSS, df = 4)4   -0.3144     0.7775  -0.404  0.68634    
## bs(Suhu, df = 4)1  -0.4519     0.8026  -0.563  0.57393    
## bs(Suhu, df = 4)2  -3.4150     2.6836  -1.273  0.20450    
## bs(Suhu, df = 4)3  24.2020    22.5687   1.072  0.28471    
## bs(Suhu, df = 4)4  -0.3891     1.1494  -0.339  0.73530    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9786 on 223 degrees of freedom
## Multiple R-squared:  0.08012,    Adjusted R-squared:  0.01412 
## F-statistic: 1.214 on 16 and 223 DF,  p-value: 0.2584
cat("\nModel Regresi Linear dan Regresi Spline berhasil dibangun ✅\n")
## 
## Model Regresi Linear dan Regresi Spline berhasil dibangun ✅

2 — Evaluasi Performa Model (R², MSE, RMSE)

# Prediksi dari kedua model
pred_lm <- predict(lm_model, newdata = data_reg)
pred_spline <- predict(spline_model, newdata = data_reg)

# Hitung R², MSE, dan RMSE untuk masing-masing model
R2_lm <- summary(lm_model)$r.squared
MSE_lm <- mean((data_reg$DO - pred_lm)^2)
RMSE_lm <- sqrt(MSE_lm)

R2_spline <- summary(spline_model)$r.squared
MSE_spline <- mean((data_reg$DO - pred_spline)^2)
RMSE_spline <- sqrt(MSE_spline)

# Tampilkan hasil perbandingan
hasil_eval <- data.frame(
  Model = c("Regresi Linear", "Regresi Spline"),
  R2 = c(R2_lm, R2_spline),
  MSE = c(MSE_lm, MSE_spline),
  RMSE = c(RMSE_lm, RMSE_spline)
)
print(hasil_eval)
##            Model          R2       MSE      RMSE
## 1 Regresi Linear 0.008660591 0.9589855 0.9792781
## 2 Regresi Spline 0.080123760 0.8898547 0.9433211

3.Visualisasi Hasil Prediksi vs Aktual

library(ggplot2)

# Gabungkan hasil prediksi
visual_df <- data.frame(
  DO_Aktual = data_reg$DO,
  Pred_LM = pred_lm,
  Pred_Spline = pred_spline
)

# Plot regresi linear
ggplot(visual_df, aes(x = DO_Aktual, y = Pred_LM)) +
  geom_point(color = "steelblue") +
  geom_abline(intercept = 0, slope = 1, color = "red", linetype = "dashed") +
  labs(title = "Prediksi vs Aktual (Regresi Linear)",
       x = "DO Aktual", y = "Prediksi DO (Linear)") +
  theme_minimal()

# Plot regresi spline
ggplot(visual_df, aes(x = DO_Aktual, y = Pred_Spline)) +
  geom_point(color = "darkgreen") +
  geom_abline(intercept = 0, slope = 1, color = "red", linetype = "dashed") +
  labs(title = "Prediksi vs Aktual (Regresi Spline)",
       x = "DO Aktual", y = "Prediksi DO (Spline)") +
  theme_minimal()

* Garis merah diagonal = prediksi sempurna (y = x).

4.Jelaskan Variabel yang Paling Mempengaruhi DO

# 1️ Ambil ringkasan model regresi linear
lm_summary <- summary(lm_model)

# 2️ Ambil koefisien dan ubah ke data frame
coef_table <- as.data.frame(lm_summary$coefficients)

# 3️ Tambahkan nama kolom yang rapi
colnames(coef_table) <- c("Estimate", "Std_Error", "t_value", "p_value")

# 4️ Tampilkan seluruh koefisien
cat("\n=== KOEFISIEN MODEL REGRESI LINEAR ===\n")
## 
## === KOEFISIEN MODEL REGRESI LINEAR ===
print(coef_table)
##                  Estimate   Std_Error     t_value      p_value
## (Intercept)  5.8945130643 1.085656494  5.42944578 1.407218e-07
## pH          -0.0336179978 0.130600222 -0.25741149 7.970864e-01
## BOD          0.1057450386 0.075801526  1.39502519 1.643253e-01
## TSS          0.0004415990 0.006584033  0.06707120 9.465820e-01
## Suhu        -0.0002626552 0.014254374 -0.01842629 9.853144e-01
# 5️ Filter variabel yang berpengaruh signifikan (p-value < 0.05)
signif_vars <- coef_table %>%
  filter(p_value < 0.05)

cat("\n=== VARIABEL YANG PALING BERPENGARUH (p < 0.05) ===\n")
## 
## === VARIABEL YANG PALING BERPENGARUH (p < 0.05) ===
if (nrow(signif_vars) > 0) {
  print(signif_vars)
} else {
  cat("Tidak ada variabel yang signifikan pada taraf 5%.\n")
}
##             Estimate Std_Error  t_value      p_value
## (Intercept) 5.894513  1.085656 5.429446 1.407218e-07
# 6️ Urutkan dari pengaruh terbesar (berdasarkan nilai absolut Estimate)
coef_table$abs_estimate <- abs(coef_table$Estimate)
top_vars <- coef_table %>%
  arrange(desc(abs_estimate)) %>%
  head(3)

cat("\n=== 3 VARIABEL DENGAN PENGARUH TERBESAR (berdasarkan nilai koefisien) ===\n")
## 
## === 3 VARIABEL DENGAN PENGARUH TERBESAR (berdasarkan nilai koefisien) ===
print(top_vars)
##              Estimate  Std_Error    t_value      p_value abs_estimate
## (Intercept)  5.894513 1.08565649  5.4294458 1.407218e-07     5.894513
## BOD          0.105745 0.07580153  1.3950252 1.643253e-01     0.105745
## pH          -0.033618 0.13060022 -0.2574115 7.970864e-01     0.033618

Interpretasi: