#JAGSExamples / scripts / normal / normal_mean.R
# Basic inference.
library('rjags')
## Loading required package: coda
## Linked to JAGS 3.4.0
## Loaded modules: basemod,bugs
setwd("~/new/Bayes/Beyes/rjags/rjags1")

df <- read.csv("normal_mean.csv")
head(df)
##          X
## 1 23.12064
## 2 25.55093
## 3 22.49311
## 4 29.78584
## 5 25.98852
## 6 22.53859
normal_mean_code <- '
  model
{
  for (i in 1:N)
  {
  x[i] ~ dnorm(mu, tau)
  }
  
  mu ~ dnorm(0, 0.0001)
  
  sigma <- 3
  tau <- pow(sigma, -2)
  }
'

writeLines(normal_mean_code,con="normal_mean.txt")


jags <- jags.model("normal_mean.txt",
                   data = list('x' = with(df, X),
                               'N' = nrow(df)),
                   n.chains = 4,
                   n.adapt = 500)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
##    Graph Size: 108
## 
## Initializing model
mcmc.samples1 <- coda.samples(jags,
                             c('mu'),
                             5000)

#png(file.path('graphs', 'normal', 'plot1.png'))
plot(mcmc.samples1)

dev.off()
## null device 
##           1
summary(mcmc.samples1)
## 
## Iterations = 1:5000
## Thinning interval = 1 
## Number of chains = 4 
## Sample size per chain = 5000 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##           Mean             SD       Naive SE Time-series SE 
##      25.325189       0.299984       0.002121       0.002132 
## 
## 2. Quantiles for each variable:
## 
##  2.5%   25%   50%   75% 97.5% 
## 24.74 25.12 25.32 25.53 25.92

#normal_mean_strong_broken_prior.R 

# Basic inference.
library('rjags')

setwd("~/new/Bayes/Beyes/rjags/rjags1")

df <- read.csv("normal_mean_strong_broken_prior.csv")
head(df)
##          X
## 1 23.12064
## 2 25.55093
## 3 22.49311
## 4 29.78584
## 5 25.98852
## 6 22.53859
normal_mean_strong_broken_prior_code <- '
  model
{
    for (i in 1:N)
    {
        x[i] ~ dnorm(mu, tau)
    }
    
    mu ~ dunif(0, 1)
    
    sigma <- 3
    tau <- pow(sigma, -2)
}
'

writeLines(normal_mean_strong_broken_prior_code,
           con="normal_mean_strong_broken_prior.txt")

jags <- jags.model("normal_mean_strong_broken_prior.txt",
                   data = list('x' = with(df, X),
                               'N' = nrow(df)),
                   n.chains = 4,
                   n.adapt = 500)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
##    Graph Size: 108
## 
## Initializing model
mcmc.samples <- coda.samples(jags,
                             c('mu'),
                             1000)

#png(file.path('graphs', 'normal', 'plot4.png'))
plot(mcmc.samples)

dev.off()
## null device 
##           1
summary(mcmc.samples)
## 
## Iterations = 501:1500
## Thinning interval = 1 
## Number of chains = 4 
## Sample size per chain = 1000 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##           Mean             SD       Naive SE Time-series SE 
##      9.964e-01      3.726e-03      5.891e-05      1.702e-04 
## 
## 2. Quantiles for each variable:
## 
##   2.5%    25%    50%    75%  97.5% 
## 0.9870 0.9950 0.9974 0.9989 0.9999

#normal_mean_strong_prior.R 

# Basic inference.
library('rjags')

setwd("~/new/Bayes/Beyes/rjags/rjags1")

df <- read.csv("normal_mean_strong_prior.csv")
head(df)
##          X
## 1 23.12064
## 2 25.55093
## 3 22.49311
## 4 29.78584
## 5 25.98852
## 6 22.53859
normal_mean_strong_prior_code <- '
  model
{
  for (i in 1:N)
  {
  x[i] ~ dnorm(mu, tau)
  }
  
  mu ~ dnorm(0, 10)
  
  sigma <- 3
  tau <- pow(sigma, -2)
  }
'

writeLines(normal_mean_strong_prior_code,
           con="normal_mean_strong_prior.txt")

jags <- jags.model("normal_mean_strong_prior.txt",
                   data = list('x' = with(df, X),
                               'N' = nrow(df)),
                   n.chains = 4,
                   n.adapt = 500)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
##    Graph Size: 108
## 
## Initializing model
mcmc.samples <- coda.samples(jags,
                             c('mu'),
                             5000)

#png(file.path('graphs', 'normal', 'plot2.png'))
plot(mcmc.samples)

dev.off()
## null device 
##           1
summary(mcmc.samples)
## 
## Iterations = 1:5000
## Thinning interval = 1 
## Number of chains = 4 
## Sample size per chain = 5000 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##           Mean             SD       Naive SE Time-series SE 
##      13.328527       0.217860       0.001541       0.001525 
## 
## 2. Quantiles for each variable:
## 
##  2.5%   25%   50%   75% 97.5% 
## 12.90 13.18 13.33 13.47 13.76

#normal_mean_strong_wrong_prior.R 

# Basic inference.
library('rjags')

setwd("~/new/Bayes/Beyes/rjags/rjags1")

df <- read.csv("normal_mean_strong_wrong_prior.csv")
head(df)
##          X
## 1 23.12064
## 2 25.55093
## 3 22.49311
## 4 29.78584
## 5 25.98852
## 6 22.53859
normal_mean_strong_wrong_prior_code <- '
  model
{
  for (i in 1:N)
  {
  x[i] ~ dnorm(mu, tau)
  }
  
  mu ~ dnorm(-1000, 10)
  
  sigma <- 3
  tau <- pow(sigma, -2)
  }
'

writeLines(normal_mean_strong_wrong_prior_code,
           con="normal_mean_strong_wrong_prior.txt")

jags <- jags.model("normal_mean_strong_wrong_prior.txt",
                   data = list('x' = with(df, X),
                               'N' = nrow(df)),
                   n.chains = 4,
                   n.adapt = 500)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
##    Graph Size: 109
## 
## Initializing model
mcmc.samples <- coda.samples(jags,
                             c('mu'),
                             10000)

#png(file.path('graphs', 'normal', 'plot3.png'))
plot(mcmc.samples)

dev.off()
## null device 
##           1
summary(mcmc.samples)
## 
## Iterations = 1:10000
## Thinning interval = 1 
## Number of chains = 4 
## Sample size per chain = 10000 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##           Mean             SD       Naive SE Time-series SE 
##     -4.604e+02      2.177e-01      1.088e-03      1.088e-03 
## 
## 2. Quantiles for each variable:
## 
##   2.5%    25%    50%    75%  97.5% 
## -460.8 -460.5 -460.4 -460.2 -459.9