Page 415 problem 15

Write a program to simulate the outcomes of a Markov chain after n steps, given the initial starting state and the transition matrix P as data (see Ex- ample 11.12). Keep this program for use in later problems.

markov_chain <- function(initial_state, transition_matrix, n_steps) {
  current_state <- initial_state
  # Iterate through the number of steps, updating the state each time
  for (i in 1:n_steps) {
    current_state <- current_state %*% transition_matrix
  }
  
  return(current_state)
}

# Example usage:
# Define the initial state (assuming 3 states for simplicity)
state1 <- c(1, 0, 0) # Starting from state 1 (Harvard)
state2 <- c(0,1,0) # Starting from state 2 (Yale)
state3 <- c(0,0,1) # Starting from state 3 (Dartmouth)
# Define the transition matrix P use matrix from 11.6 in the txt page 410
transition_matrix <- matrix(c(
  0.8, 0.2, 0,
  0.3, 0.4, 0.3,
  0.2, 0.1, 0.7
), nrow = 3, byrow = TRUE)

# Number of steps
n_steps_10 <- 10
n_steps_100 <- 100
# Simulate the Markov chain for 10 steps
final_state1_10 <- markov_chain(state1, transition_matrix, n_steps_10)
final_state2_10 <- markov_chain(state2, transition_matrix, n_steps_10)
final_state3_10 <- markov_chain(state3, transition_matrix, n_steps_10)

print(final_state1_10)
##           [,1]      [,2]    [,3]
## [1,] 0.5589736 0.2232564 0.21777
print(final_state2_10)
##           [,1]      [,2]      [,3]
## [1,] 0.5526546 0.2213458 0.2259996
print(final_state3_10)
##           [,1]      [,2]      [,3]
## [1,] 0.5499114 0.2205132 0.2295754
# Simulate the Markov Chain for 100 steps
final_state1_100 <- markov_chain(state1, transition_matrix, n_steps_100)
final_state2_100 <- markov_chain(state2, transition_matrix, n_steps_100)
final_state3_100 <- markov_chain(state3, transition_matrix, n_steps_100)

print(final_state1_100)
##           [,1]      [,2]      [,3]
## [1,] 0.5555556 0.2222222 0.2222222
print(final_state2_100)
##           [,1]      [,2]      [,3]
## [1,] 0.5555556 0.2222222 0.2222222
print(final_state3_100)
##           [,1]      [,2]      [,3]
## [1,] 0.5555556 0.2222222 0.2222222
# Simulate example 11.5 in txt pg 409

transition_matrix_11_5 <- matrix(c(
  .5, .25, .25,
  .5, .25, .25,
  .5, .25, .25
), nrow = 3, byrow = TRUE)

# Simulate with 10 steps

final_state_win_10 <- markov_chain(state1, transition_matrix_11_5 , n_steps_10)
final_state_second_10 <- markov_chain(state2, transition_matrix_11_5, n_steps_10)
final_state_third_10 <- markov_chain(state3, transition_matrix_11_5, n_steps_10)

print(final_state_win_10)
##      [,1] [,2] [,3]
## [1,]  0.5 0.25 0.25
print(final_state_second_10)
##      [,1] [,2] [,3]
## [1,]  0.5 0.25 0.25
print(final_state_third_10)
##      [,1] [,2] [,3]
## [1,]  0.5 0.25 0.25
# Simulate with 100 steps
final_state_win_100 <- markov_chain(state1, transition_matrix_11_5 , n_steps_100)
final_state_second_100 <- markov_chain(state2, transition_matrix_11_5, n_steps_100)
final_state_third_100 <- markov_chain(state3, transition_matrix_11_5, n_steps_100)

print(final_state_win_100)
##      [,1] [,2] [,3]
## [1,]  0.5 0.25 0.25
print(final_state_second_100)
##      [,1] [,2] [,3]
## [1,]  0.5 0.25 0.25
print(final_state_third_100)
##      [,1] [,2] [,3]
## [1,]  0.5 0.25 0.25

From these results we can see that in the first example when only performing 10 steps we don’t get convergence and we get different answers depending on the starting state. However then when running 100 steps we get convergence and then the starting state does not matter.

From the second example we can see convergence right away at 10 steps and we can see that the starting state does not matter in the end as the probabilities all end up the same in the end no matter what the starting state is.