# —————————— # 1) Load libraries # —————————— library(dplyr) # you don’t strictly need this, but keeping it since you used it

——————————

2) Configuration

——————————

Base URL where the AZMET station .txt files live

base_url <- “https://azmet.arizona.edu/azmet/data

Maricopa station is 06 (files like 0602.txt, 0603.txt, …, 0622.txt)

station_code <- “06”

Years of interest

years <- 2002:2022

Local paths

temps_dir <- “G:\Other computers\HyperWhale2.0\Delta State\2025 - Fall\GIS-461\FInal\temps” final_dir <- “G:\Other computers\HyperWhale2.0\Delta State\2025 - Fall\GIS-461\FInal” output_csv <- file.path(final_dir, “AZMET_Maricopa_Yearly_AirTMax_Stats_2002_2022.csv”)

Create directories if needed

if (!dir.exists(temps_dir)) dir.create(temps_dir, recursive = TRUE) if (!dir.exists(final_dir)) dir.create(final_dir, recursive = TRUE)

——————————

3) Download station text files (automation)

——————————

options(timeout = 600)

for (yr in years) { yy <- substr(as.character(yr), 3, 4) # 2002 -> “02” file_name <- paste0(station_code, yy, “rd.txt”)

file_url <- paste0(base_url, “/”, file_name) dest_path <- file.path(temps_dir, file_name)

cat(“Downloading:”, file_url, ” –> “, dest_path,”“) download.file(url = file_url, destfile = dest_path, mode =”wb”) # overwrite = TRUE by default }

cat(“files downloaded to:”, temps_dir, “”)

——————————

4) Your existing logic to read & summarize

——————————

Set working directory to temps folder

setwd(temps_dir)

List station files downloaded from AZMET

files <- list.files( path = “.”, pattern = “^(06)[0-9]{2}rd\.txt$”, # only Phoenix stations with rd suffix full.names = TRUE )

if (length(files) == 0) { stop(“No Phoenix (12/15) rd station files found in temps directory.”) }

print(files)

Name columns so they can be singled out as there are no headers on the downloaded .txt files

columns <- c( ‘Year’, ‘Day’, ‘Hour’, ‘AirT_Max’, ‘AirT_Min’, ‘AirT_Mean’, ‘RH_Max’, ‘RH_Min’, ‘RH_Mean’, ‘VPD_Mean’, ‘SolarRad’, ‘Percip’, ‘4ST_Max’, ‘4ST_Min’, ‘4ST_Mean’, ‘20ST_Max’, ‘20ST_Min’, ‘20ST_Mean’, ‘WS_Mean’, ‘WS_VectorM’, ‘WS_VectorD’, ‘WDirec_SD’, ‘MaxWS’, ‘HeatUnit’, ‘ReferEvap’ )

Read all files into a list, apply column names

data_list <- lapply(files, function(f) { df <- read.table(f, sep = “,”, header = FALSE) colnames(df) <- columns df })

Assign names based on the year in each file (2002–2022)

years <- 2002:2022 names(data_list) <- years

Calculate Mean and SD for the Air Temperature Maximum

year_mean <- numeric(length(years)) year_sd <- numeric(length(years))

for (i in seq_along(years)) { df <- data_list[[i]]

# Extract daily high temps Tmax <- df$AirT_Max

# Compute stats year_mean[i] <- mean(Tmax, na.rm = TRUE) year_sd[i] <- sd(Tmax, na.rm = TRUE) }

Create the summary data frame

results <- data.frame( Year = years, Avg_Daily_High_Temp = round(year_mean, 2), SD_Daily_High_Temp = round(year_sd, 2) )

print(results)

——————————

5) Export results to CSV

——————————

write.csv(results, output_csv, row.names = FALSE)

cat(“stats CSV written to:”, output_csv, “”)