For Question 1 using an M/M/1 queue:
c_names <- c("queue_type","dist_type", "lambda", "mu", "rho", "L", "W", "Wq", "Lq")
comp <- data.frame(matrix(NA, nrow = 4, ncol = length(c_names)))
names(comp) <- c_names
mean_interval_time = 1.25
mean_service_time = 1
comp$queue_type[1] <- "M/M/1"
comp$dist_type[1] <- "Exponential"
comp$lambda[1] <- 1/mean_interval_time
comp$mu[1] <- 1/mean_service_time
comp$rho[1] <- comp$lambda[1]/comp$mu[1]
comp$L[1] <- comp$rho[1]/(1-comp$rho[1])
comp$W[1] <- comp$L[1]/comp$lambda[1]
comp$Wq[1] <- comp$W[1]-mean_service_time
comp$Lq[1] <- comp$lambda[1]*comp$Wq[1]
Question 2, wants the service times to be uniformly distributed (not exponentially like in Question 1). This requires using an M/G/1 simulation. We need to obtain the variance \(\sigma^2\) of the distribution. \[\mu = \frac{b+a}{2}\] \[\sigma^2 = \frac{1}{b-a}\int_{a}^{b} \left ( \mu -x \right )^2dx = \frac{1}{b-a}\frac{\left ( \mu - b \right )^3 - \left ( \mu-a \right )^3}{3}=\frac{\left ( b-a \right )^2}{12}\] \[W_q=\frac{\lambda(\sigma^2+\frac{1}{\mu^2})}{2(1-\frac{\lambda}{\mu})}\]
a <- 0.1
b = 1.9
variance <- (b-a)^2/12
comp$queue_type[2] <- "M/G/1"
comp$dist_type[2] <- "Uniform"
comp$lambda[2] <- 1/mean_interval_time
comp$mu[2] <- 1/mean_service_time
comp$rho[2] <- comp$lambda[2]/comp$mu[2]
comp$Wq[2] <- comp$lambda[2]*(variance^2+1/(comp$mu[2])^2)/
(2*(1-comp$lambda[2]/comp$mu[2]))
comp$W[2] <- comp$Wq[2]+mean_service_time
comp$L[2] <- comp$lambda[2]*comp$W[2]
comp$Lq[2] <- comp$lambda[2]*comp$Wq[2]
comp[1:2,]
## queue_type dist_type lambda mu rho L W Wq Lq
## 1 M/M/1 Exponential 0.8 1 0.8 4.00000 5.0000 4.0000 3.20000
## 2 M/G/1 Uniform 0.8 1 0.8 2.51664 3.1458 2.1458 1.71664
Comparing the two methods, the M/G/1 model is lower for times in system/queue and numbers in the system/queue. This has to do with the distribution method used for service time. Even though the random exponential distribution has a larger frequency of low service times, the larger wait times are (essentially) unbounded and would have values surpassing that of the bounded uniform distribution. This behavior is causing lower times and numbers for the uniform distribution method.
The variance \(\sigma\) of the distribution can be described as:
\[\sigma^2 = \frac{a^2+m^2+b^2-am-ab-bm}{18}\]
mode_3 <- 1
variance <- (a^2+mode_3^2+b^2-a*mode_3-a*b-b*mode_3)/18
comp$queue_type[3] <- "M/G/1"
comp$dist_type[3] <- "Triangular"
comp$lambda[3] <- 1/mean_interval_time
comp$mu[3] <- 1/mean_service_time
comp$rho[3] <- comp$lambda[3]/comp$mu[3]
comp$Wq[3] <- comp$lambda[3]*(variance^2+1/(comp$mu[3])^2)/
(2*(1-comp$lambda[3]/comp$mu[3]))
comp$W[3] <- comp$Wq[3]+mean_service_time
comp$L[3] <- comp$lambda[3]*comp$W[3]
comp$Lq[3] <- comp$lambda[3]*comp$Wq[3]
comp[1:3,]
## queue_type dist_type lambda mu rho L W Wq Lq
## 1 M/M/1 Exponential 0.8 1 0.8 4.00000 5.00000 4.00000 3.20000
## 2 M/G/1 Uniform 0.8 1 0.8 2.51664 3.14580 2.14580 1.71664
## 3 M/G/1 Triangular 0.8 1 0.8 2.42916 3.03645 2.03645 1.62916
Like the uniform method, the triangular method is bounded, which causes the average times and numbers to be lower than that of the exponential distribution. The average numbers and times for the triangular distribution are lower than that of the uniform because the distribution of values larger than the expected value have less of a probability of occurring. The values less than expected value for the triangular distribution also have less of a probability than that of the uniform distribution, which somewhat counter-balances that of the previous statement. As can be seen from the output, this counter-balance affect slightly deflates the averages from the model.
Since the number of queues have changed to 3 and the service time has been upped from 1 minutes to 3 minutes, our steady-state average number in the queue \(L_q\) will change and, therefore, all the other values will change.
mean_service_time = 3
c_var = 3
n_list <- rep(0:(c_var-1),1)
comp$queue_type[4] <- "M/G/3"
comp$dist_type[4] <- "Exponential"
comp$lambda[4] <- 1/mean_interval_time
comp$mu[4] <- 1/mean_service_time
comp$rho[4] <- comp$lambda[4]/(c_var*comp$mu[4])
p_0 <- 1/((c_var*comp$rho[4])^c_var/(factorial(c_var)*(1-comp$rho[4])) +
sum((c_var*comp$rho[4])^n_list/factorial(n_list)))
comp$Lq[4] <- ((comp$rho[4]*(c_var*comp$rho[4])^c_var)*p_0)/
(factorial(c_var)*(1-comp$rho[4])^2)
comp$L[4] <- comp$Lq[4] + (comp$lambda[4]/comp$mu[4])
comp$Wq[4] <- comp$Lq[4]/comp$lambda[4]
comp$W[4] <- comp$Wq[4]+mean_service_time
comp[1:4,]
## queue_type dist_type lambda mu rho L W Wq
## 1 M/M/1 Exponential 0.8 1.0000000 0.8 4.000000 5.000000 4.000000
## 2 M/G/1 Uniform 0.8 1.0000000 0.8 2.516640 3.145800 2.145800
## 3 M/G/1 Triangular 0.8 1.0000000 0.8 2.429160 3.036450 2.036450
## 4 M/G/3 Exponential 0.8 0.3333333 0.8 4.988764 6.235955 3.235955
## Lq
## 1 3.200000
## 2 1.716640
## 3 1.629160
## 4 2.588764
First we will calculate all the variables using the same process in Problem 5. Since all expected values are exponentially distributed, the M/M/C model can be used throughout; with C = 1, the M/M/C model reduces to the M/M/1 model. The values in the table below are on a node basis, not average sums for the entire system.
clinic_col <- c("Station", "lambda", "mu", "rho", "L", "W", "Wq", "Lq")
station_row <- c("Sign In", "Registration", "Trauma Rooms", "Exam Rooms", "Treatment Rooms")
clinic <- data.frame(matrix(NA, nrow = length(station_row), ncol = length(clinic_col)))
colnames(clinic) <- clinic_col
clinic[,1] <- station_row
#Station_A
c_A = 2
service_A <- 3
n_A <- rep(0:(c_A-1),1)
clinic$lambda[1] <- 1/6
clinic$mu[1] <- 1/service_A
clinic$rho[1] <- clinic$lambda[1]/(c_A*clinic$mu[1])
p_0_A <- 1/((c_A*clinic$rho[1])^c_A/(factorial(c_A)*(1-clinic$rho[1])) +
sum((c_A*clinic$rho[1])^n_A/factorial(n_A)))
clinic$Lq[1] <- ((clinic$rho[1]*(c_A*clinic$rho[1])^c_A)*p_0_A)/
(factorial(c_A)*(1-clinic$rho[1])^2)
clinic$L[1] <- clinic$Lq[1] + (clinic$lambda[1]/clinic$mu[1])
clinic$Wq[1] <- clinic$Lq[1]/clinic$lambda[1]
clinic$W[1] <- clinic$Wq[1]+service_A
#Station_B
c_B = 1
service_B <- 5
n_B <- rep(0:(c_B-1),1)
clinic$lambda[2] <- (0.9*clinic$lambda[1])
clinic$mu[2] <- 1/service_B
clinic$rho[2] <- clinic$lambda[2]/(c_B*clinic$mu[2])
p_0_B <- 1/((c_B*clinic$rho[2])^c_B/(factorial(c_B)*(1-clinic$rho[2])) +
sum((c_B*clinic$rho[2])^n_B/factorial(n_B)))
clinic$Lq[2] <- ((clinic$rho[2]*(c_B*clinic$rho[2])^c_B)*p_0_B)/
(factorial(c_B)*(1-clinic$rho[2])^2)
clinic$L[2] <- clinic$Lq[2] + (clinic$lambda[2]/clinic$mu[2])
clinic$Wq[2] <- clinic$Lq[2]/clinic$lambda[2]
clinic$W[2] <- clinic$Wq[2]+service_B
#Station_C
c_C = 2
service_C <- 90
n_C <- rep(0:(c_C-1),1)
clinic$lambda[3] <- (0.1*clinic$lambda[1])
clinic$mu[3] <- 1/service_C
clinic$rho[3] <- clinic$lambda[3]/(c_C*clinic$mu[3])
p_0_B <- 1/((c_C*clinic$rho[3])^c_C/(factorial(c_C)*(1-clinic$rho[3])) +
sum((c_C*clinic$rho[3])^n_C/factorial(n_C)))
clinic$Lq[3] <- ((clinic$rho[3]*(c_C*clinic$rho[3])^c_C)*p_0_B)/
(factorial(c_C)*(1-clinic$rho[3])^2)
clinic$L[3] <- clinic$Lq[3] + (clinic$lambda[3]/clinic$mu[3])
clinic$Wq[3] <- clinic$Lq[3]/clinic$lambda[3]
clinic$W[3] <- clinic$Wq[3]+service_C
#Station_D
c_D = 3
service_D <- 16
n_D <- rep(0:(c_D-1),1)
clinic$lambda[4] <- (0.9*clinic$lambda[1])
clinic$mu[4] <- 1/service_D
clinic$rho[4] <- clinic$lambda[4]/(c_D*clinic$mu[4])
p_0_B <- 1/((c_D*clinic$rho[4])^c_D/(factorial(c_D)*(1-clinic$rho[4])) +
sum((c_D*clinic$rho[4])^n_D/factorial(n_D)))
clinic$Lq[4] <- ((clinic$rho[4]*(c_D*clinic$rho[4])^c_D)*p_0_B)/
(factorial(c_D)*(1-clinic$rho[4])^2)
clinic$L[4] <- clinic$Lq[4] + (clinic$lambda[4]/clinic$mu[4])
clinic$Wq[4] <- clinic$Lq[4]/clinic$lambda[4]
clinic$W[4] <- clinic$Wq[4]+service_D
#Station_E
c_E = 2
service_E <- 15
n_E <- rep(0:(c_E-1),1)
clinic$lambda[5] <- (0.9*0.6*clinic$lambda[1]) +0.1*clinic$lambda[1]
clinic$mu[5] <- 1/service_E
clinic$rho[5] <- clinic$lambda[5]/(c_E*clinic$mu[5])
p_0_B <- 1/((c_E*clinic$rho[5])^c_E/(factorial(c_E)*(1-clinic$rho[5])) +
sum((c_E*clinic$rho[5])^n_E/factorial(n_E)))
clinic$Lq[5] <- ((clinic$rho[5]*(c_E*clinic$rho[5])^c_E)*p_0_B)/
(factorial(c_E)*(1-clinic$rho[5])^2)
clinic$L[5] <- clinic$Lq[5] + (clinic$lambda[5]/clinic$mu[5])
clinic$Wq[5] <- clinic$Lq[5]/clinic$lambda[5]
clinic$W[5] <- clinic$Wq[5]+service_E
clinic
## Station lambda mu rho L W Wq
## 1 Sign In 0.16666667 0.33333333 0.25 0.5333333 3.20000 0.20000
## 2 Registration 0.15000000 0.20000000 0.75 3.0000000 20.00000 15.00000
## 3 Trauma Rooms 0.01666667 0.01111111 0.75 3.4285714 205.71429 115.71429
## 4 Exam Rooms 0.15000000 0.06250000 0.80 4.9887640 33.25843 17.25843
## 5 Treatment Rooms 0.10666667 0.06666667 0.80 4.4444444 41.66667 26.66667
## Lq
## 1 0.03333333
## 2 2.25000000
## 3 1.92857143
## 4 2.58876404
## 5 2.84444444
Since all the values for \(\rho\) are < 1, the system will be stable, i.e.- it will “work”. The Exam Room and Treatment Room have the largest \(\rho\), both 0.8. The closer \(\rho\) is to 1, the more congested the respective system is. One of these should receive another server. Out of the two, the one with the larger steady-state average time in the queue \(W_q\) is Treatment Rooms. Therefore, Treatment Rooms should receive another server