Load data
library(RPostgreSQL)
## Loading required package: DBI
library(rpart)
library(reshape2)
library(zoo)
## Warning: package 'zoo' was built under R version 3.4.3
##
## Attaching package: 'zoo'
## The following objects are masked from 'package:base':
##
## as.Date, as.Date.numeric
library(ggplot2)
library(MASS)
library(rpart)
library(gridExtra)
## Warning: package 'gridExtra' was built under R version 3.4.3
# Set working directory
setwd("c:\\Users\\Hammo\\Documents\\smoke emissions project\\code_final")
# Database login details (not in GitHub repo)
load("access.Rda")
# Connect to database
p <- dbDriver("PostgreSQL")
con <- dbConnect(p,
user=access$user,
password=access$pwd,
host='penap-data.dyndns.org',
dbname='cona',
port=5432)
# Load ECan data (stored locally)
ecan <- read.csv('RangioraWinter2016.csv',stringsAsFactors=FALSE)
names(ecan) <- c('date','time','wind.speed','wind.dir','wind.dir.std','wind.speed.std',
'wind.max','co','temp.ground','temp.2m','temp.6m','pm10',
'pm2.5','pm.course')
ecan$date <- as.POSIXct(ecan$date,format = '%m/%d/%Y %H:%M', tz='Etc/GMT-12')
ecan$time <- NULL # this is included in date
# ECan site has siteid=18
odin.raw <-dbGetQuery(con,"SELECT d.recordtime AT TIME ZONE 'NZST' AS date,
i.serialn as serial, s.name as label,
d.value::numeric as val
FROM data.fixed_data as d,
admin.sensor as s, admin.instrument as i
WHERE s.id = d.sensorid
AND s.instrumentid = i.id
AND i.name = 'ODIN-SD-3'
AND d.siteid = 18
AND (i.serialn = 'ODIN-109')
AND (d.recordtime BETWEEN '2016-01-01 00:00 NZST'
AND '2017-01-01 00:00 NZST')
AND NOT (d.recordtime BETWEEN '2016-09-25 02:00 NZST'
AND '2016-09-25 03:00 NZST')
AND (s.name = 'PM2.5'
OR s.name = 'Temperature'
OR s.name = 'RH')
ORDER BY date;")
# 02:01 to 02:59 on 2016-09-25 is excluded because this is when the switch to NZST occurs.
# Without the exclusion clause the timestamps will truncate at 'day'.
# Truncate seconds from timestamps
odin.raw$date <- trunc(odin.raw$date,'min')
odin.raw$date <- as.POSIXct(odin.raw$date) # the above converted it to POSIXlt
# Fix daylight savings bug
ds.time <- as.POSIXct('09/25/2016 02:00',format = '%m/%d/%Y %H:%M', tz='Etc/GMT-12')
odin.raw[odin.raw$date > ds.time, 'date'] <-
odin.raw[odin.raw$date > ds.time, 'date'] + 60*60
Preprocessing
odin <- dcast(odin.raw, date~label, value.var='val', fun.aggregate=mean)
# Make column names easier to type
names(odin) <- tolower(names(odin))
names(odin)[which(names(odin)=="temperature")] <- "temp"
data <- ecan
# Make sure that there is a odin entry for each minute
dates <- data.frame(date=seq(odin$date[1],odin$date[nrow(odin)],by='mins'))
odin <- merge(odin,dates,by="date",all=TRUE)
# Change the ODIN measurements from 1 min averages to 1 hour averages
odin.zoo <- zoo( odin[,2:ncol(odin)] )
odin.roll.apply <- rollapply(odin.zoo,width=60,by=1,FUN=mean,align="left",na.rm=TRUE)
odin.avgs <- odin[1:nrow(odin.roll.apply),]
odin.avgs[,2:ncol(odin)] <- odin.roll.apply
# Take timestamps at the end of the hour average following ECan convention
odin.avgs$date <- odin.avgs$date + 60*60
# Purge NaNs
odin.avgs <- odin.avgs[!is.nan(odin.avgs$pm2.5),]
odin.avgs <- odin.avgs[!is.nan(odin.avgs$rh),]
odin.avgs <- odin.avgs[!is.nan(odin.avgs$temp),]
names(odin.avgs) <- c('date',paste0(names(odin.avgs)[2:ncol(odin.avgs)],'.odin.109'))
data <- merge(data,odin.avgs,by='date',all=FALSE)
Testing Modeling Techniques
# Split up data into thirds
third <- as.integer(nrow(data)/3)
data.1 <- data[1:third,]
data.2 <- data[(third+1):(third*2),]
data.3 <- data[(third*2+1):nrow(data),]
models <- list(NA,NA,NA,NA)
names(models) <- c("linear","quadratic","cubic","reg tree") # "robust linear",
# Table of mean-square-errors
mse <- array(NA,dim=c(4,length(models)),
dimnames=list(c('data.1','data.2','data.3','avg'),names(models)))
# Axis limits
var.x.mins <- c(pm2.5=0,temp=0,rh=25)
var.x.maxs <- c(pm2.5=100,temp=35,rh=100)
var.y.min <- -10
var.y.max <- 175
for (j in 1:3) {
# Linear formula
in.vars <- paste0(c('pm2.5','temp','rh'),'.odin.109')
eval(parse(text=paste0("form <- pm2.5 ~ ",paste0(in.vars,collapse='+'))))
# Quadratic formula
quad.in.vars <- paste0("poly(",in.vars,",2)")
eval(parse(text=paste0("quad.form <- pm2.5 ~ ",paste0(quad.in.vars,collapse='+'))))
# Cubic formula
cub.in.vars <- paste0("poly(",in.vars,",3)")
eval(parse(text=paste0("cub.form <- pm2.5 ~ ",paste0(cub.in.vars,collapse='+'))))
# Test and training data
eval(parse(text=paste0("test.data <- data.",j)))
eval(parse(text=paste0("train.data <- rbind(data.",1+j%%3,",data.",1+(j+1)%%3,")")))
# Linear model
models[['linear']] <- lm(form,data=train.data)
y.hat <- predict(models[['linear']],test.data)
mse[j,'linear'] <- mean((test.data$pm2.5-y.hat)^2,na.rm=TRUE)
# # Robust linear model
# models[['robust linear']] <- rlm(form,data=train.data)
# y.hat <- predict(models[['robust linear']],test.data)
# mse[j,'robust linear'] <- mean((test.data$pm2.5-y.hat)^2,na.rm=TRUE)
# Quadratic model
models[['quadratic']] <- lm(quad.form,data=train.data)
y.hat <- predict(models[['quadratic']],test.data)
mse[j,'quadratic'] <- mean((test.data$pm2.5-y.hat)^2,na.rm=TRUE)
# Cubic model
models[['cubic']] <- lm(cub.form,data=train.data)
y.hat <- predict(models[['cubic']],test.data)
mse[j,'cubic'] <- mean((test.data$pm2.5-y.hat)^2,na.rm=TRUE)
# Regression Tree
models[['reg tree']] <- rpart(form,data=train.data,method='anova')
y.hat <- predict(models[['reg tree']],test.data,'vector')
mse[j,'reg tree'] <- mean((test.data$pm2.5-y.hat)^2,na.rm=TRUE)
# Plotting models
# Median dataframes
medians <- data.frame()
for (i in 1:3) {
medians[1:100,in.vars[i]] <- rep(median(train.data[,in.vars[i]]),100)
}
# Axis labels
xlabs <- list(pm2.5=expression("ODIN-109"~PM[2.5]~(mu*"g"~m^-3)),
temp=expression("ODIN-109 Temperature ("*degree*"C)"),
rh=expression("ODIN-109 RH (%)"))
# Plot titles
mod.titles <- c("Linear","Quadratic","Cubic","Regression Tree")
# Plots
print(paste0("Data.",j,":"))
for (i in 1:3) {
for (k in 1:length(models)) {
df <- medians
df[,in.vars[i]] <- seq(var.x.mins[i],var.x.maxs[i],length.out=100)
line.df <- data.frame(x=df[,in.vars[i]],y=predict(models[[k]],df))
point.df <- data.frame(x=train.data[,in.vars[i]],y=train.data[,"pm2.5"])
plt <- ggplot() +
geom_point(data=point.df,aes(x,y),alpha=0.1) +
geom_line(data=line.df,aes(x,y),colour="red") +
xlim(var.x.mins[i],var.x.maxs[i]) +
ylim(var.y.min,var.y.max) +
xlab(xlabs[[i]]) +
ylab(expression("TEOM"~PM[2.5]~(mu*'g'~m^-3))) +
ggtitle(mod.titles[k])
# ggtitle(titles[[i]])
eval(parse(text=paste0("plt",k," <- plt")))
if (k==1)
plt.str <- "plt1"
else
plt.str <- paste0(plt.str,", plt",k)
}
eval(parse(text=paste0("grid.arrange(",plt.str,",ncol=2)")))
}
# Find average accross ODINs
for (i in 1:length(models)) {
mse['avg',i] <- mean(mse[,i],na.rm=TRUE)
}
}
## [1] "Data.1:"
## Warning: Removed 136 rows containing missing values (geom_point).
## Warning: Removed 136 rows containing missing values (geom_point).
## Warning: Removed 136 rows containing missing values (geom_point).
## Warning: Removed 136 rows containing missing values (geom_point).

## Warning: Removed 227 rows containing missing values (geom_point).
## Warning: Removed 227 rows containing missing values (geom_point).
## Warning: Removed 227 rows containing missing values (geom_point).
## Warning: Removed 227 rows containing missing values (geom_point).

## Warning: Removed 260 rows containing missing values (geom_point).
## Warning: Removed 260 rows containing missing values (geom_point).
## Warning: Removed 260 rows containing missing values (geom_point).
## Warning: Removed 260 rows containing missing values (geom_point).

## [1] "Data.2:"
## Warning: Removed 147 rows containing missing values (geom_point).
## Warning: Removed 147 rows containing missing values (geom_point).
## Warning: Removed 147 rows containing missing values (geom_point).
## Warning: Removed 147 rows containing missing values (geom_point).

## Warning: Removed 696 rows containing missing values (geom_point).
## Warning: Removed 696 rows containing missing values (geom_point).
## Warning: Removed 696 rows containing missing values (geom_point).
## Warning: Removed 696 rows containing missing values (geom_point).

## Warning: Removed 240 rows containing missing values (geom_point).
## Warning: Removed 240 rows containing missing values (geom_point).
## Warning: Removed 240 rows containing missing values (geom_point).
## Warning: Removed 240 rows containing missing values (geom_point).

## [1] "Data.3:"
## Warning: Removed 219 rows containing missing values (geom_point).
## Warning: Removed 219 rows containing missing values (geom_point).
## Warning: Removed 219 rows containing missing values (geom_point).
## Warning: Removed 219 rows containing missing values (geom_point).

## Warning: Removed 839 rows containing missing values (geom_point).
## Warning: Removed 839 rows containing missing values (geom_point).
## Warning: Removed 839 rows containing missing values (geom_point).
## Warning: Removed 839 rows containing missing values (geom_point).

## Warning: Removed 252 rows containing missing values (geom_point).
## Warning: Removed 252 rows containing missing values (geom_point).
## Warning: Removed 1 rows containing missing values (geom_path).
## Warning: Removed 252 rows containing missing values (geom_point).
## Warning: Removed 1 rows containing missing values (geom_path).
## Warning: Removed 252 rows containing missing values (geom_point).

mse <- round(mse,2)
mse
## linear quadratic cubic reg tree
## data.1 58.62 57.92 55.60 81.65
## data.2 20.29 20.98 20.41 37.26
## data.3 12.27 11.90 11.38 11.43
## avg 34.25 34.10 32.81 49.30