##Analyses of a systematic review with both pairwise and network meta-analysis comparing the orotracheal intubation performance of different videolaryngoscopes and the Macintosh direct laryngoscope in adult patients

#Packages to be used
library(meta)
## Loading 'meta' package (version 4.12-0).
## Type 'help(meta)' for a brief overview.
library(dmetar)
## Extensive documentation for the dmetar package can be found at: 
##  www.bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/
library(dplyr)
## 
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
## 
##     filter, lag
## The following objects are masked from 'package:base':
## 
##     intersect, setdiff, setequal, union
library(metafor)
## Loading required package: Matrix
## Loading 'metafor' package (version 2.4-0). For an overview 
## and introduction to the package please type: help(metafor).
## 
## Attaching package: 'metafor'
## The following objects are masked from 'package:meta':
## 
##     baujat, forest, funnel, funnel.default, labbe, radial, trimfill
library(gemtc)
## Loading required package: coda
## 
## Attaching package: 'gemtc'
## The following object is masked from 'package:metafor':
## 
##     forest
## The following object is masked from 'package:dmetar':
## 
##     sucra
## The following object is masked from 'package:meta':
## 
##     forest
library(ggplot2)
library(igraph)
## 
## Attaching package: 'igraph'
## The following objects are masked from 'package:dplyr':
## 
##     as_data_frame, groups, union
## The following objects are masked from 'package:stats':
## 
##     decompose, spectrum
## The following object is masked from 'package:base':
## 
##     union
library(ggplot2)
library(scales)
library(facetscales)

#Pairwise analyses: videolaryngoscopes (VLs) together against the Macintosh direct laryngoscope (Mac-DL)

Failed Intubation Attempts

fia<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Table Pairwise Failed Intubation.csv")
length(fia$fint.e1)
## [1] 120
#Number of comparisons with zero failed intubation in both arms
fia_zeros<-dplyr::filter(fia,fia$fint.e1==0 & fia$fint.e2==0)
length(fia_zeros$fint.e1)
## [1] 64
#Table for Meta-analysis of failed intubation
fia_analysis<-dplyr::filter(fia,fia$fint.e1>0 | fia$fint.e2>0)

#Number of comparisons and patients meta-analized for failed intubation
length(fia_analysis$fint.e1)
## [1] 56
sum(na.omit(fia_analysis$fint.t1),na.omit(fia_analysis$fint.t2))
## [1] 6396
#Meta-analysis for failed intubation
mbin_fia_fixed<-meta::metabin(fint.e1,fint.t1,fint.e2,fint.t2,data = fia_analysis,studlab = paste(author, year),comb.fixed = TRUE,comb.random = FALSE,method.tau = "SJ",hakn = TRUE,prediction = TRUE,incr = 0.5,sm = "RR")
mbin_fia_fixed
##                             RR             95%-CI %W(fixed)
## Abdallah 2011          10.7822 [0.6123; 189.8605]       0.2
## Abdelgalel 2018         0.1677 [0.0070;   4.0257]       0.6
## Altun 2018              0.3496 [0.0607;   2.0132]       1.2
## Ander 2017              0.0909 [0.0052;   1.5894]       1.7
## Andersen 2011           0.2000 [0.0098;   4.0624]       0.8
## Aoi 2010                1.0000 [0.0676;  14.7865]       0.3
## Arima 2014              4.7345 [0.2326;  96.3642]       0.2
## Aziz 2012               0.4718 [0.2387;   0.9327]       7.1
## Bakshi - NTI 2015       1.7500 [0.4169;   7.3460]       0.8
## Bakshi - NVL 2015       5.5965 [0.3319;  94.3792]       0.2
## Bakshi 2019             0.3333 [0.0140;   7.9235]       0.5
## Bhandari 2013           0.2000 [0.0099;   4.0371]       0.8
## Chalkeidis 2010         3.2000 [0.3787;  27.0413]       0.3
## Chandrashekaraiah 2017  0.3333 [0.0141;   7.8648]       0.5
## Dhonneur 2008           0.3333 [0.0137;   8.0906]       0.5
## Driver 2016             0.2306 [0.0502;   1.0587]       2.5
## Erden 2010              3.0000 [0.1315;  68.4178]       0.2
## Foulds 2016             0.0694 [0.0042;   1.1508]       2.2
## Gao 2018                0.7593 [0.2757;   2.0909]       2.4
## Hu 2017                 0.3167 [0.0131;   7.6805]       0.5
## Ilyas 2014             11.0000 [0.6209; 194.8652]       0.2
## Jungbauer 2009          0.1250 [0.0159;   0.9810]       2.4
## Kill 2013               0.1429 [0.0077;   2.6497]       1.1
## Kleine-Brueggeney 2017  0.3462 [0.2514;   0.4766]      23.9
## Koh 2010                0.2500 [0.0300;   2.0828]       1.2
## Lin 2012                0.6667 [0.1143;   3.8895]       0.9
## Liu 2016                2.0000 [0.1846;  21.6662]       0.3
## Liu 2019                0.0481 [0.0028;   0.8155]       3.2
## Maharaj 2007            0.3333 [0.0144;   7.7130]       0.5
## Maharaj 2008            0.1111 [0.0064;   1.9341]       1.4
## Malik 2008              0.5000 [0.0877;   2.8510]       0.9
## Malik1 2009             0.1250 [0.0147;   1.0604]       1.6
## Mcelwain 2011           0.2672 [0.0252;   2.8318]       0.8
## Myunghun-Kim 2017       3.0000 [0.1297;  69.4167]       0.2
## Inangil 2018            0.2000 [0.0100;   4.0192]       0.8
## Ndoko 2008              0.0769 [0.0044;   1.3317]       2.0
## Parasa 2016             2.9048 [0.1230;  68.5796]       0.2
## Peirovifar 2014         0.4000 [0.0876;   1.8256]       1.5
## Pournajafian 2014       2.0000 [0.5276;   7.5816]       0.9
## Ranieri 2012            0.1883 [0.0092;   3.8484]       0.8
## Risse 2020              0.1826 [0.0091;   3.6594]       0.8
## Ruetzeler 2020          0.3818 [0.0769;   1.8970]       1.6
## Shah 2016               0.3333 [0.0141;   7.8648]       0.5
## Shukla 2017             0.2000 [0.0099;   4.0371]       0.8
## Sun 2005                0.3333 [0.0137;   8.0852]       0.5
## Takenaka 2011           0.0883 [0.0051;   1.5379]       1.7
## Taylor 2013             0.0270 [0.0017;   0.4348]       5.7
## Walker 2009             3.0000 [0.1247;  72.1913]       0.2
## Yoo 2018                0.2000 [0.0254;   1.5756]       1.5
## Cavus 2011              0.0387 [0.0022;   0.6726]       2.6
## Enomoto 2008            0.0457 [0.0027;   0.7646]       3.4
## Zhao 2014               0.3649 [0.1828;   0.7281]       7.6
## Cordovani 2019          0.5000 [0.1359;   1.8393]       1.7
## Ferrando 2011           3.0000 [0.1271;  70.7833]       0.2
## Serocki 2013            0.0569 [0.0032;   1.0243]       1.8
## Serocki 2010            0.2500 [0.0478;   1.3075]       1.6
## 
## Number of studies combined: k = 56
## 
##                         RR           95%-CI      z  p-value
## Fixed effect model  0.3855 [0.3198; 0.4646] -10.00 < 0.0001
## Prediction interval        [0.0500; 3.3648]                
## 
## Quantifying heterogeneity:
##  tau^2 = 1.0714 [0.0000; 1.2285]; tau = 1.0351 [0.0000; 1.1084];
##  I^2 = 17.5% [0.0%; 41.3%]; H = 1.10 [1.00; 1.31]
## 
## Test of heterogeneity:
##      Q d.f. p-value
##  66.65   55  0.1349
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Continuity correction of 0.5 in studies with zero cell frequencies
mbin_fia_random<-meta::metabin(fint.e1,fint.t1,fint.e2,fint.t2,data = fia_analysis,studlab = paste(author, year),comb.fixed = FALSE,comb.random = TRUE,method.tau = "SJ",hakn = TRUE,prediction = TRUE,incr = 0.5,sm = "RR")
mbin_fia_random
##                             RR             95%-CI %W(random)
## Abdallah 2011          10.7822 [0.6123; 189.8605]        1.4
## Abdelgalel 2018         0.1677 [0.0070;   4.0257]        1.2
## Altun 2018              0.3496 [0.0607;   2.0132]        2.4
## Ander 2017              0.0909 [0.0052;   1.5894]        1.4
## Andersen 2011           0.2000 [0.0098;   4.0624]        1.3
## Aoi 2010                1.0000 [0.0676;  14.7865]        1.5
## Arima 2014              4.7345 [0.2326;  96.3642]        1.3
## Aziz 2012               0.4718 [0.2387;   0.9327]        3.7
## Bakshi - NTI 2015       1.7500 [0.4169;   7.3460]        2.8
## Bakshi - NVL 2015       5.5965 [0.3319;  94.3792]        1.4
## Bakshi 2019             0.3333 [0.0140;   7.9235]        1.2
## Bhandari 2013           0.2000 [0.0099;   4.0371]        1.3
## Chalkeidis 2010         3.2000 [0.3787;  27.0413]        2.0
## Chandrashekaraiah 2017  0.3333 [0.0141;   7.8648]        1.2
## Dhonneur 2008           0.3333 [0.0137;   8.0906]        1.2
## Driver 2016             0.2306 [0.0502;   1.0587]        2.6
## Erden 2010              3.0000 [0.1315;  68.4178]        1.2
## Foulds 2016             0.0694 [0.0042;   1.1508]        1.4
## Gao 2018                0.7593 [0.2757;   2.0909]        3.3
## Hu 2017                 0.3167 [0.0131;   7.6805]        1.2
## Ilyas 2014             11.0000 [0.6209; 194.8652]        1.4
## Jungbauer 2009          0.1250 [0.0159;   0.9810]        2.0
## Kill 2013               0.1429 [0.0077;   2.6497]        1.3
## Kleine-Brueggeney 2017  0.3462 [0.2514;   0.4766]        4.0
## Koh 2010                0.2500 [0.0300;   2.0828]        2.0
## Lin 2012                0.6667 [0.1143;   3.8895]        2.4
## Liu 2016                2.0000 [0.1846;  21.6662]        1.7
## Liu 2019                0.0481 [0.0028;   0.8155]        1.4
## Maharaj 2007            0.3333 [0.0144;   7.7130]        1.2
## Maharaj 2008            0.1111 [0.0064;   1.9341]        1.4
## Malik 2008              0.5000 [0.0877;   2.8510]        2.4
## Malik1 2009             0.1250 [0.0147;   1.0604]        2.0
## Mcelwain 2011           0.2672 [0.0252;   2.8318]        1.8
## Myunghun-Kim 2017       3.0000 [0.1297;  69.4167]        1.2
## Inangil 2018            0.2000 [0.0100;   4.0192]        1.3
## Ndoko 2008              0.0769 [0.0044;   1.3317]        1.4
## Parasa 2016             2.9048 [0.1230;  68.5796]        1.2
## Peirovifar 2014         0.4000 [0.0876;   1.8256]        2.7
## Pournajafian 2014       2.0000 [0.5276;   7.5816]        2.9
## Ranieri 2012            0.1883 [0.0092;   3.8484]        1.3
## Risse 2020              0.1826 [0.0091;   3.6594]        1.3
## Ruetzeler 2020          0.3818 [0.0769;   1.8970]        2.5
## Shah 2016               0.3333 [0.0141;   7.8648]        1.2
## Shukla 2017             0.2000 [0.0099;   4.0371]        1.3
## Sun 2005                0.3333 [0.0137;   8.0852]        1.2
## Takenaka 2011           0.0883 [0.0051;   1.5379]        1.4
## Taylor 2013             0.0270 [0.0017;   0.4348]        1.4
## Walker 2009             3.0000 [0.1247;  72.1913]        1.2
## Yoo 2018                0.2000 [0.0254;   1.5756]        2.0
## Cavus 2011              0.0387 [0.0022;   0.6726]        1.4
## Enomoto 2008            0.0457 [0.0027;   0.7646]        1.4
## Zhao 2014               0.3649 [0.1828;   0.7281]        3.7
## Cordovani 2019          0.5000 [0.1359;   1.8393]        2.9
## Ferrando 2011           3.0000 [0.1271;  70.7833]        1.2
## Serocki 2013            0.0569 [0.0032;   1.0243]        1.4
## Serocki 2010            0.2500 [0.0478;   1.3075]        2.5
## 
## Number of studies combined: k = 56
## 
##                          RR           95%-CI     t  p-value
## Random effects model 0.4100 [0.2881; 0.5833] -5.07 < 0.0001
## Prediction interval         [0.0500; 3.3648]               
## 
## Quantifying heterogeneity:
##  tau^2 = 1.0714 [0.0000; 1.2285]; tau = 1.0351 [0.0000; 1.1084];
##  I^2 = 17.5% [0.0%; 41.3%]; H = 1.10 [1.00; 1.31]
## 
## Test of heterogeneity:
##      Q d.f. p-value
##  66.65   55  0.1349
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Continuity correction of 0.5 in studies with zero cell frequencies
#Estimated probability of failed intubation with Macintosh
meta::metaprop(event = fint.e2,n = fint.t2 ,studlab = paste(author,year),data = fia,method = "GLMM",sm = "PLOGIT",comb.fixed = FALSE,comb.random = TRUE,hakn = TRUE)
##                               proportion           95%-CI
## Abdallah 2011                     0.0000 [0.0000; 0.0725]
## Abdallah 2019                     0.0000 [0.0000; 0.1000]
## Abdelgalel 2018                   0.0250 [0.0006; 0.1316]
## Abdelgawad - Normotensos 2015     0.0000 [0.0000; 0.0881]
## Agrawal 2020                      0.0000 [0.0000; 0.0881]
## Ahmad 2016                        0.0000 [0.0000; 0.0377]
## Ahmad 2015                        0.0000 [0.0000; 0.1372]
## Akbar 2015                        0.0000 [0.0000; 0.0787]
## Al - Ghamdi 2016                  0.0000 [0.0000; 0.1544]
## Altun 2018                        0.0698 [0.0146; 0.1906]
## Ander 2017                        0.1282 [0.0430; 0.2743]
## Andersen 2011                     0.0400 [0.0049; 0.1371]
## Aoi 2010                          0.0556 [0.0014; 0.2729]
## Aqil 2016                         0.0000 [0.0000; 0.0881]
## Aqil 2017                         0.0000 [0.0000; 0.0513]
## Arici 2014                        0.0000 [0.0000; 0.0881]
## Arima 2014                        0.0000 [0.0000; 0.0672]
## Arslan 2017                       0.0000 [0.0000; 0.0881]
## Aziz 2012                         0.1565 [0.1018; 0.2255]
## Bakshi - NTI 2015                 0.1429 [0.0178; 0.4281]
## Bakshi - NVL 2015                 0.0000 [0.0000; 0.2316]
## Bakshi - ExXP 2015                0.0000 [0.0000; 0.2316]
## Bakshi 2019                       0.0270 [0.0007; 0.1416]
## Barak 2007                        0.0000 [0.0000; 0.0402]
## Barman 2017                       0.0000 [0.0000; 0.1000]
## Bashir 2020                       0.0000 [0.0000; 0.0881]
## Colak 2019                        0.0000 [0.0000; 0.0787]
## El - Tahan 2018                   0.0000 [0.0000; 0.1089]
## Huang 2020                        0.0000 [0.0000; 0.1157]
## Wasem 2013                        0.0000 [0.0000; 0.1157]
## Yao 2015                          0.0000 [0.0000; 0.0740]
## Bhalla 2018                       0.0000 [0.0000; 0.2180]
## Bhandari 2013                     0.0500 [0.0061; 0.1692]
## Bharti 2014                       0.0000 [0.0000; 0.1765]
## Bhat 2015                         0.0000 [0.0000; 0.0711]
## Bilehjani 2009                    0.0000 [0.0000; 0.0925]
## Blajic 2019                       0.0000 [0.0000; 0.0606]
## \x82akir 2020                     0.0000 [0.0000; 0.1122]
## Carlino 2009                      0.0000 [0.0000; 0.2180]
## Chalkeidis 2010                   0.0357 [0.0009; 0.1835]
## Chandrashekaraiah 2017            0.0333 [0.0008; 0.1722]
## Dey 2020                          0.0000 [0.0000; 0.0330]
## Dhonneur 2008                     0.0094 [0.0002; 0.0514]
## Driver 2016                       0.0842 [0.0371; 0.1592]
## Erden 2010                        0.0000 [0.0000; 0.2059]
## Erturk 2015                       0.0000 [0.0000; 0.0881]
## Foulds 2016                       0.2800 [0.1207; 0.4939]
## Gao 2018                          0.0976 [0.0431; 0.1832]
## Griesdale 2012                    0.0000 [0.0000; 0.1684]
## Gunes 2020                        0.0000 [0.0000; 0.0402]
## Gupta 2020                        0.0000 [0.0000; 0.0881]
## Hirabayashi 2009                  0.0000 [0.0000; 0.0143]
## Hirabayashi 2010                  0.0000 [0.0000; 0.0362]
## Hosalli 2017                      0.0000 [0.0000; 0.1157]
## Hsu 2012                          0.0000 [0.0000; 0.1157]
## Hu 2017                           0.0105 [0.0003; 0.0573]
## Ilyas 2014                        0.0000 [0.0000; 0.0560]
## Jungbauer 2009                    0.0800 [0.0352; 0.1516]
## Kaur 2020                         0.0000 [0.0000; 0.0881]
## Kido 2015                         0.0000 [0.0000; 0.1372]
## Kill 2013                         0.1000 [0.0211; 0.2653]
## Kim 2013                          0.0000 [0.0000; 0.1482]
## Kleine-Brueggeney 2017            0.4333 [0.3432; 0.5269]
## Koh 2010                          0.1600 [0.0454; 0.3608]
## K\x9f\x8d\x9fkosman 2020          0.0000 [0.0000; 0.1157]
## Laosuwan 2015                     0.0000 [0.0000; 0.2849]
## Lascarrou 2017                    0.0000 [0.0000; 0.0201]
## Lim 2005                          0.0000 [0.0000; 0.1157]
## Lin 2012                          0.0353 [0.0073; 0.0997]
## Liu 2014                          0.0000 [0.0000; 0.0881]
## Liu 2016                          0.0111 [0.0003; 0.0604]
## Liu 2019                          0.0552 [0.0268; 0.0993]
## Macke 2020                        0.0000 [0.0000; 0.0474]
## Maharaj 2006                      0.0000 [0.0000; 0.1157]
## Maharaj 2007                      0.0500 [0.0013; 0.2487]
## Maharaj 2008                      0.2000 [0.0573; 0.4366]
## Malik 2008                        0.0667 [0.0082; 0.2207]
## Malik1 2009                       0.1600 [0.0454; 0.3608]
## Malik2 2009                       0.0000 [0.0000; 0.1157]
## Maruyama 2008                     0.0000 [0.0000; 0.2646]
## Mcelwain 2011                     0.0645 [0.0079; 0.2142]
## Myunghun-Kim 2017                 0.0000 [0.0000; 0.1684]
## Kulkarni 2013                     0.0000 [0.0000; 0.1157]
## Inangil 2018                      0.0571 [0.0070; 0.1916]
## Ing 2017                          0.0000 [0.0000; 0.2059]
## Jafra 2018                        0.0000 [0.0000; 0.0362]
## Ndoko 2008                        0.1132 [0.0427; 0.2303]
## Ninan 2016                        0.0000 [0.0000; 0.1157]
## Parasa 2016                       0.0000 [0.0000; 0.1157]
## Pazur 2016                        0.0000 [0.0000; 0.1323]
## Peirovifar 2014                   0.2500 [0.0866; 0.4910]
## Pournajafian 2014                 0.0566 [0.0118; 0.1566]
## Ranieri 2012                      0.0312 [0.0038; 0.1084]
## Reena 2019                        0.0000 [0.0000; 0.0711]
## Risse 2020                        0.0645 [0.0079; 0.2142]
## Ruetzeler 2020                    0.0794 [0.0263; 0.1756]
## Sargin 2016                       0.0000 [0.0000; 0.0711]
## Shah 2016                         0.0333 [0.0008; 0.1722]
## Shukla 2017                       0.0500 [0.0061; 0.1692]
## Sulser 2016                       0.0000 [0.0000; 0.0493]
## Sun 2005                          0.0100 [0.0003; 0.0545]
## Takenaka 2011                     0.1471 [0.0495; 0.3106]
## Taylor 2013                       0.4091 [0.2634; 0.5675]
## Teoh 2010                         0.0000 [0.0000; 0.0362]
## Tsan 2020                         0.0000 [0.0000; 0.0521]
## Varsha 2019                       0.0000 [0.0000; 0.1000]
## Vijayakumar 2016                  0.0000 [0.0000; 0.0787]
## Walker 2009                       0.0000 [0.0000; 0.0596]
## Yoo 2018                          0.2273 [0.0782; 0.4537]
## Cavus 2011                        0.1200 [0.0453; 0.2431]
## El-Tahan 2017                     0.0000 [0.0000; 0.2316]
## Enomoto 2008                      0.1058 [0.0540; 0.1814]
## Paik 2020                         0.0000 [0.0000; 0.3085]
## Turkstra 2009                     0.0000 [0.0000; 0.2471]
## Zhao 2014                         0.3333 [0.2286; 0.4517]
## Cordovani 2019                    0.2500 [0.0866; 0.4910]
## Ferrando 2011                     0.0000 [0.0000; 0.1157]
## Serocki 2013                      0.1250 [0.0351; 0.2899]
## Serocki 2010                      0.1000 [0.0279; 0.2366]
## Arora 2013                        0.0000 [0.0000; 0.0660]
## 
## Number of studies combined: k = 120
## 
##                      proportion           95%-CI
## Random effects model     0.0086 [0.0046; 0.0162]
## 
## Quantifying heterogeneity:
##  tau^2 = 4.8180; tau = 2.1950; I^2 = 90.7%; H = 3.27
## 
## Test of heterogeneity:
##       Q d.f.  p-value             Test
##  219.02  119 < 0.0001        Wald-type
##  744.72  119 < 0.0001 Likelihood-Ratio
## 
## Details on meta-analytical method:
## - Random intercept logistic regression model
## - Maximum-likelihood estimator for tau^2
## - Hartung-Knapp adjustment for random effects model
## - Logit transformation
## - Clopper-Pearson confidence interval for individual studies
## - Continuity correction of 0.5 in studies with zero cell frequencies
##   (only used to calculate individual study results)
#Forest plot for failed intubation
meta::forest(mbin_fia_random,sortvar=TE,lab.e="Videolaryngoscopes",lab.c="Macintosh",col.study="black",col.square="black",col.diamond="blue")

meta::forest(mbin_fia_random,sortvar=TE,lab.e="Videolaryngoscopes",lab.c="Macintosh",col.study="black",col.square="black",col.diamond="blue")

#Detecting Outliers for failed intubation
dmetar::find.outliers(mbin_fia_random)
## Identified outliers (random-effects model) 
## ------------------------------------------ 
## "Abdallah 2011", "Ilyas 2014" 
##  
## Results with outliers removed 
## ----------------------------- 
##                             RR             95%-CI %W(random) exclude
## Abdallah 2011          10.7822 [0.6123; 189.8605]        0.0       *
## Abdelgalel 2018         0.1677 [0.0070;   4.0257]        1.2        
## Altun 2018              0.3496 [0.0607;   2.0132]        2.5        
## Ander 2017              0.0909 [0.0052;   1.5894]        1.4        
## Andersen 2011           0.2000 [0.0098;   4.0624]        1.3        
## Aoi 2010                1.0000 [0.0676;  14.7865]        1.5        
## Arima 2014              4.7345 [0.2326;  96.3642]        1.3        
## Aziz 2012               0.4718 [0.2387;   0.9327]        4.2        
## Bakshi - NTI 2015       1.7500 [0.4169;   7.3460]        2.9        
## Bakshi - NVL 2015       5.5965 [0.3319;  94.3792]        1.4        
## Bakshi 2019             0.3333 [0.0140;   7.9235]        1.2        
## Bhandari 2013           0.2000 [0.0099;   4.0371]        1.3        
## Chalkeidis 2010         3.2000 [0.3787;  27.0413]        2.0        
## Chandrashekaraiah 2017  0.3333 [0.0141;   7.8648]        1.2        
## Dhonneur 2008           0.3333 [0.0137;   8.0906]        1.2        
## Driver 2016             0.2306 [0.0502;   1.0587]        2.8        
## Erden 2010              3.0000 [0.1315;  68.4178]        1.2        
## Foulds 2016             0.0694 [0.0042;   1.1508]        1.4        
## Gao 2018                0.7593 [0.2757;   2.0909]        3.7        
## Hu 2017                 0.3167 [0.0131;   7.6805]        1.2        
## Ilyas 2014             11.0000 [0.6209; 194.8652]        0.0       *
## Jungbauer 2009          0.1250 [0.0159;   0.9810]        2.1        
## Kill 2013               0.1429 [0.0077;   2.6497]        1.3        
## Kleine-Brueggeney 2017  0.3462 [0.2514;   0.4766]        4.7        
## Koh 2010                0.2500 [0.0300;   2.0828]        2.0        
## Lin 2012                0.6667 [0.1143;   3.8895]        2.5        
## Liu 2016                2.0000 [0.1846;  21.6662]        1.7        
## Liu 2019                0.0481 [0.0028;   0.8155]        1.4        
## Maharaj 2007            0.3333 [0.0144;   7.7130]        1.2        
## Maharaj 2008            0.1111 [0.0064;   1.9341]        1.4        
## Malik 2008              0.5000 [0.0877;   2.8510]        2.5        
## Malik1 2009             0.1250 [0.0147;   1.0604]        2.0        
## Mcelwain 2011           0.2672 [0.0252;   2.8318]        1.8        
## Myunghun-Kim 2017       3.0000 [0.1297;  69.4167]        1.2        
## Inangil 2018            0.2000 [0.0100;   4.0192]        1.3        
## Ndoko 2008              0.0769 [0.0044;   1.3317]        1.4        
## Parasa 2016             2.9048 [0.1230;  68.5796]        1.2        
## Peirovifar 2014         0.4000 [0.0876;   1.8256]        2.8        
## Pournajafian 2014       2.0000 [0.5276;   7.5816]        3.1        
## Ranieri 2012            0.1883 [0.0092;   3.8484]        1.3        
## Risse 2020              0.1826 [0.0091;   3.6594]        1.3        
## Ruetzeler 2020          0.3818 [0.0769;   1.8970]        2.7        
## Shah 2016               0.3333 [0.0141;   7.8648]        1.2        
## Shukla 2017             0.2000 [0.0099;   4.0371]        1.3        
## Sun 2005                0.3333 [0.0137;   8.0852]        1.2        
## Takenaka 2011           0.0883 [0.0051;   1.5379]        1.4        
## Taylor 2013             0.0270 [0.0017;   0.4348]        1.4        
## Walker 2009             3.0000 [0.1247;  72.1913]        1.2        
## Yoo 2018                0.2000 [0.0254;   1.5756]        2.1        
## Cavus 2011              0.0387 [0.0022;   0.6726]        1.4        
## Enomoto 2008            0.0457 [0.0027;   0.7646]        1.4        
## Zhao 2014               0.3649 [0.1828;   0.7281]        4.2        
## Cordovani 2019          0.5000 [0.1359;   1.8393]        3.2        
## Ferrando 2011           3.0000 [0.1271;  70.7833]        1.2        
## Serocki 2013            0.0569 [0.0032;   1.0243]        1.3        
## Serocki 2010            0.2500 [0.0478;   1.3075]        2.6        
## 
## Number of studies combined: k = 54
## 
##                          RR           95%-CI     t  p-value
## Random effects model 0.3777 [0.2729; 0.5229] -6.00 < 0.0001
## Prediction interval         [0.0591; 2.4135]               
## 
## Quantifying heterogeneity:
##  tau^2 = 0.8280 [0.0000; 0.8202]; tau = 0.9099 [0.0000; 0.9056];
##  I^2 = 6.0% [0.0%; 32.2%]; H = 1.03 [1.00; 1.21]
## 
## Test of heterogeneity:
##      Q d.f. p-value
##  56.41   53  0.3486
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Continuity correction of 0.5 in studies with zero cell frequencies
#Influence Analysis for failed intubation
inf_analysis_fia<-dmetar::InfluenceAnalysis(mbin_fia_random,random = TRUE)
## [===========================================================================] DONE
plot(inf_analysis_fia,"baujat")

#Meta-regression for failed intubation

#Controling for population characteristics (general, obese, neck immobilization, pregnant women, elderly)
meta::metareg(mbin_fia_random,~population)
## Warning in rma.uni(yi = TE[!exclude], sei = seTE[!exclude], data = dataset, :
## Redundant predictors dropped from the model.
## 
## Mixed-Effects Model (k = 56; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     1.0838 (SE = 0.2926)
## tau (square root of estimated tau^2 value):             1.0410
## I^2 (residual heterogeneity / unaccounted variability): 54.12%
## H^2 (unaccounted variability / sampling variability):   2.18
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 53) = 63.5771, p-val = 0.1516
## 
## Test of Moderators (coefficients 2:3):
## F(df1 = 2, df2 = 53) = 0.7132, p-val = 0.4947
## 
## Model Results:
## 
##                                estimate      se     tval    pval    ci.lb 
## intrcpt                         -1.1074  0.5466  -2.0261  0.0478  -2.2037 
## populationGeneral                0.3526  0.5863   0.6013  0.5502  -0.8234 
## populationNeck Immobilization   -0.1514  0.6757  -0.2241  0.8235  -1.5066 
##                                  ci.ub 
## intrcpt                        -0.0111  * 
## populationGeneral               1.5286    
## populationNeck Immobilization   1.2038    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for operator experience
meta::metareg(mbin_fia_random,~experience)
## 
## Mixed-Effects Model (k = 56; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.8471 (SE = 0.2673)
## tau (square root of estimated tau^2 value):             0.9204
## I^2 (residual heterogeneity / unaccounted variability): 51.11%
## H^2 (unaccounted variability / sampling variability):   2.05
## R^2 (amount of heterogeneity accounted for):            20.94%
## 
## Test for Residual Heterogeneity:
## QE(df = 53) = 54.3949, p-val = 0.4211
## 
## Test of Moderators (coefficients 2:3):
## F(df1 = 2, df2 = 53) = 7.8948, p-val = 0.0010
## 
## Model Results:
## 
##                            estimate      se     tval    pval    ci.lb    ci.ub 
## intrcpt                     -2.2446  0.4826  -4.6507  <.0001  -3.2126  -1.2765 
## experienceExperienced        1.1858  0.5247   2.2601  0.0280   0.1334   2.2382 
## experienceNon-experienced    2.0802  0.5529   3.7626  0.0004   0.9713   3.1890 
##  
## intrcpt                    *** 
## experienceExperienced        * 
## experienceNon-experienced  *** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for intubation technique (regular, rapid sequence induction)
meta::metareg(mbin_fia_random,~technique)
## 
## Mixed-Effects Model (k = 56; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     1.0640 (SE = 0.2939)
## tau (square root of estimated tau^2 value):             1.0315
## I^2 (residual heterogeneity / unaccounted variability): 59.43%
## H^2 (unaccounted variability / sampling variability):   2.46
## R^2 (amount of heterogeneity accounted for):            0.69%
## 
## Test for Residual Heterogeneity:
## QE(df = 52) = 63.6573, p-val = 0.1289
## 
## Test of Moderators (coefficients 2:4):
## F(df1 = 3, df2 = 52) = 0.9673, p-val = 0.4152
## 
## Model Results:
## 
##                                            estimate      se     tval    pval 
## intrcpt                                      0.0559  0.5991   0.0934  0.9260 
## techniqueRapid Sequence Induction           -1.3456  0.9852  -1.3658  0.1779 
## techniqueRegular                            -1.0189  0.6293  -1.6191  0.1115 
## techniqueRegular/Rapid Sequence Induction   -1.0187  1.2548  -0.8119  0.4206 
##                                              ci.lb   ci.ub 
## intrcpt                                    -1.1463  1.2582    
## techniqueRapid Sequence Induction          -3.3225  0.6314    
## techniqueRegular                           -2.2816  0.2438    
## techniqueRegular/Rapid Sequence Induction  -3.5367  1.4992    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for setting (operating room, out of hospital, etc.)
meta::metareg(mbin_fia_random,~set)
## Warning in rma.uni(yi = TE[!exclude], sei = seTE[!exclude], data = dataset, :
## Redundant predictors dropped from the model.
## 
## Mixed-Effects Model (k = 56; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     1.0734 (SE = 0.2951)
## tau (square root of estimated tau^2 value):             1.0361
## I^2 (residual heterogeneity / unaccounted variability): 59.56%
## H^2 (unaccounted variability / sampling variability):   2.47
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 52) = 62.5007, p-val = 0.1511
## 
## Test of Moderators (coefficients 2:4):
## F(df1 = 3, df2 = 52) = 0.9541, p-val = 0.4214
## 
## Model Results:
## 
##                     estimate      se     tval    pval    ci.lb   ci.ub 
## intrcpt              -1.4671  1.0836  -1.3540  0.1816  -3.6415  0.7072    
## setICU                0.7903  1.3648   0.5791  0.5650  -1.9483  3.5290    
## setOperating Room     0.5470  1.0991   0.4976  0.6208  -1.6585  2.7525    
## setOut of Hospital    3.0220  1.8918   1.5975  0.1162  -0.7741  6.8181    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for nature of procedure (elective, urgency)
meta::metareg(mbin_fia_random,~nature)
## Warning in rma.uni(yi = TE[!exclude], sei = seTE[!exclude], data = dataset, :
## Redundant predictors dropped from the model.
## 
## Mixed-Effects Model (k = 56; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     1.0851 (SE = 0.2904)
## tau (square root of estimated tau^2 value):             1.0417
## I^2 (residual heterogeneity / unaccounted variability): 59.83%
## H^2 (unaccounted variability / sampling variability):   2.49
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 54) = 65.8408, p-val = 0.1296
## 
## Test of Moderators (coefficient 2):
## F(df1 = 1, df2 = 54) = 0.2803, p-val = 0.5987
## 
## Model Results:
## 
##                 estimate      se     tval    pval    ci.lb   ci.ub 
## intrcpt          -0.5828  0.6103  -0.9548  0.3439  -1.8064  0.6409    
## natureElective   -0.3377  0.6378  -0.5295  0.5987  -1.6165  0.9411    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for inducer used
meta::metareg(mbin_fia_random,~inducer)
## Warning in rma.uni(yi = TE[!exclude], sei = seTE[!exclude], data = dataset, :
## Redundant predictors dropped from the model.
## 
## Mixed-Effects Model (k = 56; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     1.0536 (SE = 0.2997)
## tau (square root of estimated tau^2 value):             1.0264
## I^2 (residual heterogeneity / unaccounted variability): 58.42%
## H^2 (unaccounted variability / sampling variability):   2.41
## R^2 (amount of heterogeneity accounted for):            1.67%
## 
## Test for Residual Heterogeneity:
## QE(df = 49) = 56.9979, p-val = 0.2021
## 
## Test of Moderators (coefficients 2:7):
## F(df1 = 6, df2 = 49) = 1.1187, p-val = 0.3653
## 
## Model Results:
## 
##                            estimate      se     tval    pval    ci.lb   ci.ub 
## intrcpt                     -0.2870  0.3938  -0.7289  0.4696  -1.0784  0.5043 
## inducerEtomidate            -2.7464  1.5271  -1.7984  0.0783  -5.8153  0.3224 
## inducerMidazolam            -1.3224  1.5850  -0.8344  0.4081  -4.5075  1.8627 
## inducerPropofol             -0.7755  0.4467  -1.7359  0.0889  -1.6732  0.1223 
## inducerPropofol/Ketamine    -1.4985  1.6464  -0.9102  0.3672  -4.8070  1.8099 
## inducerPropofol/Midazolam    0.2775  1.1962   0.2319  0.8175  -2.1265  2.6814 
## inducerThiopental           -0.1037  0.8436  -0.1229  0.9027  -1.7990  1.5917 
##  
## intrcpt 
## inducerEtomidate           . 
## inducerMidazolam 
## inducerPropofol            . 
## inducerPropofol/Ketamine 
## inducerPropofol/Midazolam 
## inducerThiopental 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for opioid used
meta::metareg(mbin_fia_random,~opioid)
## Warning in rma.uni(yi = TE[!exclude], sei = seTE[!exclude], data = dataset, :
## Redundant predictors dropped from the model.
## 
## Mixed-Effects Model (k = 56; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.9127 (SE = 0.2792)
## tau (square root of estimated tau^2 value):             0.9553
## I^2 (residual heterogeneity / unaccounted variability): 53.68%
## H^2 (unaccounted variability / sampling variability):   2.16
## R^2 (amount of heterogeneity accounted for):            14.82%
## 
## Test for Residual Heterogeneity:
## QE(df = 51) = 53.4212, p-val = 0.3814
## 
## Test of Moderators (coefficients 2:5):
## F(df1 = 4, df2 = 51) = 3.4447, p-val = 0.0144
## 
## Model Results:
## 
##                             estimate      se     tval    pval    ci.lb    ci.ub 
## intrcpt                      -0.4999  0.2950  -1.6945  0.0963  -1.0922   0.0924 
## opioidFentanyl               -0.1434  0.3755  -0.3818  0.7042  -0.8972   0.6105 
## opioidFentanyl/Sulfentanil   -3.1110  1.3868  -2.2433  0.0292  -5.8950  -0.3269 
## opioidRemifentanil           -1.2238  0.5087  -2.4059  0.0198  -2.2449  -0.2026 
## opioidSulfentanil            -1.4656  0.7049  -2.0791  0.0427  -2.8808  -0.0504 
##  
## intrcpt                     . 
## opioidFentanyl 
## opioidFentanyl/Sulfentanil  * 
## opioidRemifentanil          * 
## opioidSulfentanil           * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for neuromuscular blocking agent used
meta::metareg(mbin_fia_random,blocker)
## Warning in rma.uni(yi = TE[!exclude], sei = seTE[!exclude], data = dataset, :
## Redundant predictors dropped from the model.
## 
## Mixed-Effects Model (k = 56; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.9153 (SE = 0.2851)
## tau (square root of estimated tau^2 value):             0.9567
## I^2 (residual heterogeneity / unaccounted variability): 52.69%
## H^2 (unaccounted variability / sampling variability):   2.11
## R^2 (amount of heterogeneity accounted for):            14.57%
## 
## Test for Residual Heterogeneity:
## QE(df = 49) = 50.1363, p-val = 0.4281
## 
## Test of Moderators (coefficients 2:7):
## F(df1 = 6, df2 = 49) = 2.5608, p-val = 0.0309
## 
## Model Results:
## 
##                               estimate      se     tval    pval    ci.lb 
## intrcpt                        -0.4209  0.3366  -1.2505  0.2171  -1.0972 
## blockerAtracurium              -0.4488  0.5359  -0.8374  0.4064  -1.5258 
## blockerCisatracurium            0.6110  0.7093   0.8615  0.3932  -0.8143 
## blockerRocuronium              -1.0261  0.4317  -2.3766  0.0214  -1.8937 
## blockerRocuronium/Vecuronium   -0.6777  1.5306  -0.4428  0.6599  -3.7536 
## blockerSuccinylcholine         -1.3977  0.6526  -2.1418  0.0372  -2.7091 
## blockerVecuronium               0.4417  0.5973   0.7395  0.4631  -0.7586 
##                                 ci.ub 
## intrcpt                        0.2555    
## blockerAtracurium              0.6282    
## blockerCisatracurium           2.0364    
## blockerRocuronium             -0.1585  * 
## blockerRocuronium/Vecuronium   2.3981    
## blockerSuccinylcholine        -0.0863  * 
## blockerVecuronium              1.6421    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Multiple Meta-regression for failed intubation
str(fia)
## 'data.frame':    120 obs. of  18 variables:
##  $ id          : int  1 2 3 4 6 7 8 11 14 18 ...
##  $ author      : Factor w/ 111 levels "\x82akir","Abdallah",..: 2 2 3 4 5 6 6 7 8 9 ...
##  $ year        : int  2011 2019 2018 2015 2020 2016 2015 2015 2016 2018 ...
##  $ population  : Factor w/ 5 levels "Elderly","General",..: 4 2 2 2 3 3 2 3 2 2 ...
##  $ predicted   : Factor w/ 3 levels "","Difficult",..: 2 3 2 3 2 2 3 2 3 3 ...
##  $ set         : Factor w/ 5 levels "Emergency department",..: 4 4 2 4 4 4 4 4 4 4 ...
##  $ nature      : Factor w/ 3 levels "","Elective",..: 2 2 3 2 2 2 2 2 2 2 ...
##  $ technique   : Factor w/ 4 levels "","Rapid Sequence Induction",..: 1 3 2 3 3 1 3 3 3 3 ...
##  $ experience  : Factor w/ 3 levels "","Experienced",..: 3 2 2 2 2 2 2 2 3 2 ...
##  $ intervention: Factor w/ 1 level "Videolaryngoscope": 1 1 1 1 1 1 1 1 1 1 ...
##  $ reference   : Factor w/ 1 level "Macintosh": 1 1 1 1 1 1 1 1 1 1 ...
##  $ inducer     : Factor w/ 8 levels "","Etomidate",..: 1 4 5 4 4 1 4 4 4 4 ...
##  $ opioid      : Factor w/ 6 levels "","Fentanyl",..: 1 2 2 2 2 1 1 2 5 2 ...
##  $ blocker     : Factor w/ 8 levels "","Atracurium",..: 1 3 4 4 8 1 4 4 4 4 ...
##  $ fint.e1     : int  5 0 0 0 0 0 0 0 0 2 ...
##  $ fint.t1     : int  50 35 80 40 40 78 25 45 64 82 ...
##  $ fint.e2     : int  0 0 1 0 0 0 0 0 0 3 ...
##  $ fint.t2     : int  49 35 40 40 40 96 25 45 22 43 ...
model_fia<-metafor::rma.uni(ai=fint.e1,n1i = fint.t1,ci=fint.e2,n2i = fint.t2,data = fia,method = "ML", measure = "RR", mods = ~experience+technique+population+set+nature+inducer+opioid+blocker,test = "knha")
model_fia
## 
## Mixed-Effects Model (k = 120; tau^2 estimator: ML)
## 
## tau^2 (estimated amount of residual heterogeneity):     0 (SE = 0.0354)
## tau (square root of estimated tau^2 value):             0
## I^2 (residual heterogeneity / unaccounted variability): 0.00%
## H^2 (unaccounted variability / sampling variability):   1.00
## R^2 (amount of heterogeneity accounted for):            100.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 85) = 38.2241, p-val = 1.0000
## 
## Test of Moderators (coefficients 2:35):
## F(df1 = 34, df2 = 85) = 2.5568, p-val = 0.0003
## 
## Model Results:
## 
##                                            estimate      se     tval    pval 
## intrcpt                                     -2.6134  2.2892  -1.1416  0.2568 
## experienceExperienced                        0.4271  0.3569   1.1967  0.2348 
## experienceNon-experienced                    0.7158  0.3670   1.9503  0.0544 
## techniqueRapid Sequence Induction           -1.5544  0.7316  -2.1246  0.0365 
## techniqueRegular                            -1.7240  0.5911  -2.9164  0.0045 
## techniqueRegular/Rapid Sequence Induction   -1.7341  0.8695  -1.9944  0.0493 
## populationGeneral                           -0.9036  1.3489  -0.6699  0.5048 
## populationNeck Immobilization               -1.0461  1.3378  -0.7819  0.4364 
## populationObese                             -0.9527  1.4335  -0.6646  0.5081 
## populationPregnant Women                    -0.1311  1.7407  -0.0753  0.9401 
## setICU                                       1.4652  0.6488   2.2583  0.0265 
## setMultiple                                  1.4671  1.4240   1.0303  0.3058 
## setOperating Room                            2.9901  1.6520   1.8100  0.0738 
## setOut of Hospital                           3.4702  1.0278   3.3763  0.0011 
## natureElective                               0.9412  0.9800   0.9604  0.3396 
## natureUrgent                                 1.3341  1.7637   0.7564  0.4515 
## inducerEtomidate                            -2.7294  1.3209  -2.0663  0.0418 
## inducerMidazolam                            -1.0232  1.0618  -0.9636  0.3380 
## inducerPropofol                             -0.7507  0.7649  -0.9815  0.3291 
## inducerPropofol/Ketamine                    -0.5165  1.2447  -0.4150  0.6792 
## inducerPropofol/Midazolam                   -0.5014  0.9648  -0.5197  0.6046 
## inducerPropofol/Thipental                   -0.8701  1.5733  -0.5530  0.5817 
## inducerThiopental                            0.4869  0.7786   0.6254  0.5334 
## opioidFentanyl                               0.7017  0.4231   1.6583  0.1010 
## opioidFentanyl/Sulfentanil                  -1.4360  1.0722  -1.3393  0.1840 
## opioidMorphine                               0.5823  1.4238   0.4090  0.6836 
## opioidRemifentanil                           0.2004  0.4807   0.4170  0.6778 
## opioidSulfentanil                            0.5505  0.6402   0.8598  0.3923 
## blockerAtracurium                            0.2983  0.6711   0.4444  0.6579 
## blockerCisatracurium                         1.0045  0.7001   1.4347  0.1551 
## blockerRocuronium                            0.0281  0.6149   0.0457  0.9637 
## blockerRocuronium/Vecuronium                -0.4557  1.2557  -0.3629  0.7176 
## blockerSuccinylcholine                      -0.3668  0.7219  -0.5081  0.6127 
## blockerSuccinylcholine/Rocuronium            3.4652  2.1734   1.5944  0.1146 
## blockerVecuronium                            1.0510  0.6544   1.6062  0.1119 
##                                              ci.lb    ci.ub 
## intrcpt                                    -7.1650   1.9381     
## experienceExperienced                      -0.2825   1.1367     
## experienceNon-experienced                  -0.0139   1.4456   . 
## techniqueRapid Sequence Induction          -3.0090  -0.0998   * 
## techniqueRegular                           -2.8994  -0.5487  ** 
## techniqueRegular/Rapid Sequence Induction  -3.4628  -0.0054   * 
## populationGeneral                          -3.5856   1.7784     
## populationNeck Immobilization              -3.7060   1.6138     
## populationObese                            -3.8030   1.8975     
## populationPregnant Women                   -3.5921   3.3299     
## setICU                                      0.1752   2.7553   * 
## setMultiple                                -1.3642   4.2985     
## setOperating Room                          -0.2946   6.2748   . 
## setOut of Hospital                          1.4266   5.5137  ** 
## natureElective                             -1.0073   2.8896     
## natureUrgent                               -2.1727   4.8408     
## inducerEtomidate                           -5.3557  -0.1031   * 
## inducerMidazolam                           -3.1344   1.0880     
## inducerPropofol                            -2.2715   0.7701     
## inducerPropofol/Ketamine                   -2.9913   1.9583     
## inducerPropofol/Midazolam                  -2.4196   1.4168     
## inducerPropofol/Thipental                  -3.9981   2.2580     
## inducerThiopental                          -1.0611   2.0349     
## opioidFentanyl                             -0.1396   1.5430     
## opioidFentanyl/Sulfentanil                 -3.5679   0.6958     
## opioidMorphine                             -2.2486   3.4133     
## opioidRemifentanil                         -0.7554   1.1563     
## opioidSulfentanil                          -0.7225   1.8234     
## blockerAtracurium                          -1.0361   1.6327     
## blockerCisatracurium                       -0.3876   2.3965     
## blockerRocuronium                          -1.1945   1.2507     
## blockerRocuronium/Vecuronium               -2.9523   2.0409     
## blockerSuccinylcholine                     -1.8020   1.0685     
## blockerSuccinylcholine/Rocuronium          -0.8561   7.7865     
## blockerVecuronium                          -0.2500   2.3521     
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
metafor::permutest(model_fia)
## Running 1000 iterations for approximate permutation test.
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |                                                                      |   1%
  |                                                                            
  |=                                                                     |   1%
  |                                                                            
  |=                                                                     |   2%
  |                                                                            
  |==                                                                    |   2%
  |                                                                            
  |==                                                                    |   3%
  |                                                                            
  |==                                                                    |   4%
  |                                                                            
  |===                                                                   |   4%
  |                                                                            
  |===                                                                   |   5%
  |                                                                            
  |====                                                                  |   5%
  |                                                                            
  |====                                                                  |   6%
  |                                                                            
  |=====                                                                 |   6%
  |                                                                            
  |=====                                                                 |   7%
  |                                                                            
  |=====                                                                 |   8%
  |                                                                            
  |======                                                                |   8%
  |                                                                            
  |======                                                                |   9%
  |                                                                            
  |=======                                                               |   9%
  |                                                                            
  |=======                                                               |  10%
  |                                                                            
  |=======                                                               |  11%
  |                                                                            
  |========                                                              |  11%
  |                                                                            
  |========                                                              |  12%
  |                                                                            
  |=========                                                             |  12%
  |                                                                            
  |=========                                                             |  13%
  |                                                                            
  |=========                                                             |  14%
  |                                                                            
  |==========                                                            |  14%
  |                                                                            
  |==========                                                            |  15%
  |                                                                            
  |===========                                                           |  15%
  |                                                                            
  |===========                                                           |  16%
  |                                                                            
  |============                                                          |  16%
  |                                                                            
  |============                                                          |  17%
  |                                                                            
  |============                                                          |  18%
  |                                                                            
  |=============                                                         |  18%
  |                                                                            
  |=============                                                         |  19%
  |                                                                            
  |==============                                                        |  19%
  |                                                                            
  |==============                                                        |  20%
  |                                                                            
  |==============                                                        |  21%
  |                                                                            
  |===============                                                       |  21%
  |                                                                            
  |===============                                                       |  22%
  |                                                                            
  |================                                                      |  22%
  |                                                                            
  |================                                                      |  23%
  |                                                                            
  |================                                                      |  24%
  |                                                                            
  |=================                                                     |  24%
  |                                                                            
  |=================                                                     |  25%
  |                                                                            
  |==================                                                    |  25%
  |                                                                            
  |==================                                                    |  26%
  |                                                                            
  |===================                                                   |  26%
  |                                                                            
  |===================                                                   |  27%
  |                                                                            
  |===================                                                   |  28%
  |                                                                            
  |====================                                                  |  28%
  |                                                                            
  |====================                                                  |  29%
  |                                                                            
  |=====================                                                 |  29%
  |                                                                            
  |=====================                                                 |  30%
  |                                                                            
  |=====================                                                 |  31%
  |                                                                            
  |======================                                                |  31%
  |                                                                            
  |======================                                                |  32%
  |                                                                            
  |=======================                                               |  32%
  |                                                                            
  |=======================                                               |  33%
  |                                                                            
  |=======================                                               |  34%
  |                                                                            
  |========================                                              |  34%
  |                                                                            
  |========================                                              |  35%
  |                                                                            
  |=========================                                             |  35%
  |                                                                            
  |=========================                                             |  36%
  |                                                                            
  |==========================                                            |  36%
  |                                                                            
  |==========================                                            |  37%
  |                                                                            
  |==========================                                            |  38%
  |                                                                            
  |===========================                                           |  38%
  |                                                                            
  |===========================                                           |  39%
  |                                                                            
  |============================                                          |  39%
  |                                                                            
  |============================                                          |  40%
  |                                                                            
  |============================                                          |  41%
  |                                                                            
  |=============================                                         |  41%
  |                                                                            
  |=============================                                         |  42%
  |                                                                            
  |==============================                                        |  42%
  |                                                                            
  |==============================                                        |  43%
  |                                                                            
  |==============================                                        |  44%
  |                                                                            
  |===============================                                       |  44%
  |                                                                            
  |===============================                                       |  45%
  |                                                                            
  |================================                                      |  45%
  |                                                                            
  |================================                                      |  46%
  |                                                                            
  |=================================                                     |  46%
  |                                                                            
  |=================================                                     |  47%
  |                                                                            
  |=================================                                     |  48%
  |                                                                            
  |==================================                                    |  48%
  |                                                                            
  |==================================                                    |  49%
  |                                                                            
  |===================================                                   |  49%
  |                                                                            
  |===================================                                   |  50%
  |                                                                            
  |===================================                                   |  51%
  |                                                                            
  |====================================                                  |  51%
  |                                                                            
  |====================================                                  |  52%
  |                                                                            
  |=====================================                                 |  52%
  |                                                                            
  |=====================================                                 |  53%
  |                                                                            
  |=====================================                                 |  54%
  |                                                                            
  |======================================                                |  54%
  |                                                                            
  |======================================                                |  55%
  |                                                                            
  |=======================================                               |  55%
  |                                                                            
  |=======================================                               |  56%
  |                                                                            
  |========================================                              |  56%
  |                                                                            
  |========================================                              |  57%
  |                                                                            
  |========================================                              |  58%
  |                                                                            
  |=========================================                             |  58%
  |                                                                            
  |=========================================                             |  59%
  |                                                                            
  |==========================================                            |  59%
  |                                                                            
  |==========================================                            |  60%
  |                                                                            
  |==========================================                            |  61%
  |                                                                            
  |===========================================                           |  61%
  |                                                                            
  |===========================================                           |  62%
  |                                                                            
  |============================================                          |  62%
  |                                                                            
  |============================================                          |  63%
  |                                                                            
  |============================================                          |  64%
  |                                                                            
  |=============================================                         |  64%
  |                                                                            
  |=============================================                         |  65%
  |                                                                            
  |==============================================                        |  65%
  |                                                                            
  |==============================================                        |  66%
  |                                                                            
  |===============================================                       |  66%
  |                                                                            
  |===============================================                       |  67%
  |                                                                            
  |===============================================                       |  68%
  |                                                                            
  |================================================                      |  68%
  |                                                                            
  |================================================                      |  69%
  |                                                                            
  |=================================================                     |  69%
  |                                                                            
  |=================================================                     |  70%
  |                                                                            
  |=================================================                     |  71%
  |                                                                            
  |==================================================                    |  71%
  |                                                                            
  |==================================================                    |  72%
  |                                                                            
  |===================================================                   |  72%
  |                                                                            
  |===================================================                   |  73%
  |                                                                            
  |===================================================                   |  74%
  |                                                                            
  |====================================================                  |  74%
  |                                                                            
  |====================================================                  |  75%
  |                                                                            
  |=====================================================                 |  75%
  |                                                                            
  |=====================================================                 |  76%
  |                                                                            
  |======================================================                |  76%
  |                                                                            
  |======================================================                |  77%
  |                                                                            
  |======================================================                |  78%
  |                                                                            
  |=======================================================               |  78%
  |                                                                            
  |=======================================================               |  79%
  |                                                                            
  |========================================================              |  79%
  |                                                                            
  |========================================================              |  80%
  |                                                                            
  |========================================================              |  81%
  |                                                                            
  |=========================================================             |  81%
  |                                                                            
  |=========================================================             |  82%
  |                                                                            
  |==========================================================            |  82%
  |                                                                            
  |==========================================================            |  83%
  |                                                                            
  |==========================================================            |  84%
  |                                                                            
  |===========================================================           |  84%
  |                                                                            
  |===========================================================           |  85%
  |                                                                            
  |============================================================          |  85%
  |                                                                            
  |============================================================          |  86%
  |                                                                            
  |=============================================================         |  86%
  |                                                                            
  |=============================================================         |  87%
  |                                                                            
  |=============================================================         |  88%
  |                                                                            
  |==============================================================        |  88%
  |                                                                            
  |==============================================================        |  89%
  |                                                                            
  |===============================================================       |  89%
  |                                                                            
  |===============================================================       |  90%
  |                                                                            
  |===============================================================       |  91%
  |                                                                            
  |================================================================      |  91%
  |                                                                            
  |================================================================      |  92%
  |                                                                            
  |=================================================================     |  92%
  |                                                                            
  |=================================================================     |  93%
  |                                                                            
  |=================================================================     |  94%
  |                                                                            
  |==================================================================    |  94%
  |                                                                            
  |==================================================================    |  95%
  |                                                                            
  |===================================================================   |  95%
  |                                                                            
  |===================================================================   |  96%
  |                                                                            
  |====================================================================  |  96%
  |                                                                            
  |====================================================================  |  97%
  |                                                                            
  |====================================================================  |  98%
  |                                                                            
  |===================================================================== |  98%
  |                                                                            
  |===================================================================== |  99%
  |                                                                            
  |======================================================================|  99%
  |                                                                            
  |======================================================================| 100%
## 
## Test of Moderators (coefficients 2:35):
## F(df1 = 34, df2 = 85) = 2.5568, p-val* = 0.0180
## 
## Model Results:
## 
##                                            estimate      se     tval   pval* 
## intrcpt                                     -2.6134  2.2892  -1.1416  0.2140 
## experienceExperienced                        0.4271  0.3569   1.1967  0.2940 
## experienceNon-experienced                    0.7158  0.3670   1.9503  0.1030 
## techniqueRapid Sequence Induction           -1.5544  0.7316  -2.1246  0.0570 
## techniqueRegular                            -1.7240  0.5911  -2.9164  0.0120 
## techniqueRegular/Rapid Sequence Induction   -1.7341  0.8695  -1.9944  0.0880 
## populationGeneral                           -0.9036  1.3489  -0.6699  0.3870 
## populationNeck Immobilization               -1.0461  1.3378  -0.7819  0.3530 
## populationObese                             -0.9527  1.4335  -0.6646  0.4370 
## populationPregnant Women                    -0.1311  1.7407  -0.0753  0.9330 
## setICU                                       1.4652  0.6488   2.2583  0.0450 
## setMultiple                                  1.4671  1.4240   1.0303  0.2290 
## setOperating Room                            2.9901  1.6520   1.8100  0.0680 
## setOut of Hospital                           3.4702  1.0278   3.3763  0.0090 
## natureElective                               0.9412  0.9800   0.9604  0.3790 
## natureUrgent                                 1.3341  1.7637   0.7564  0.4550 
## inducerEtomidate                            -2.7294  1.3209  -2.0663  0.0590 
## inducerMidazolam                            -1.0232  1.0618  -0.9636  0.3890 
## inducerPropofol                             -0.7507  0.7649  -0.9815  0.3980 
## inducerPropofol/Ketamine                    -0.5165  1.2447  -0.4150  0.7050 
## inducerPropofol/Midazolam                   -0.5014  0.9648  -0.5197  0.6270 
## inducerPropofol/Thipental                   -0.8701  1.5733  -0.5530  0.5570 
## inducerThiopental                            0.4869  0.7786   0.6254  0.5870 
## opioidFentanyl                               0.7017  0.4231   1.6583  0.1580 
## opioidFentanyl/Sulfentanil                  -1.4360  1.0722  -1.3393  0.1850 
## opioidMorphine                               0.5823  1.4238   0.4090  0.6480 
## opioidRemifentanil                           0.2004  0.4807   0.4170  0.7130 
## opioidSulfentanil                            0.5505  0.6402   0.8598  0.4630 
## blockerAtracurium                            0.2983  0.6711   0.4444  0.6760 
## blockerCisatracurium                         1.0045  0.7001   1.4347  0.1980 
## blockerRocuronium                            0.0281  0.6149   0.0457  0.9610 
## blockerRocuronium/Vecuronium                -0.4557  1.2557  -0.3629  0.6900 
## blockerSuccinylcholine                      -0.3668  0.7219  -0.5081  0.6750 
## blockerSuccinylcholine/Rocuronium            3.4652  2.1734   1.5944  0.0840 
## blockerVecuronium                            1.0510  0.6544   1.6062  0.1630 
##                                              ci.lb    ci.ub 
## intrcpt                                    -7.1650   1.9381     
## experienceExperienced                      -0.2825   1.1367     
## experienceNon-experienced                  -0.0139   1.4456     
## techniqueRapid Sequence Induction          -3.0090  -0.0998   . 
## techniqueRegular                           -2.8994  -0.5487   * 
## techniqueRegular/Rapid Sequence Induction  -3.4628  -0.0054   . 
## populationGeneral                          -3.5856   1.7784     
## populationNeck Immobilization              -3.7060   1.6138     
## populationObese                            -3.8030   1.8975     
## populationPregnant Women                   -3.5921   3.3299     
## setICU                                      0.1752   2.7553   * 
## setMultiple                                -1.3642   4.2985     
## setOperating Room                          -0.2946   6.2748   . 
## setOut of Hospital                          1.4266   5.5137  ** 
## natureElective                             -1.0073   2.8896     
## natureUrgent                               -2.1727   4.8408     
## inducerEtomidate                           -5.3557  -0.1031   . 
## inducerMidazolam                           -3.1344   1.0880     
## inducerPropofol                            -2.2715   0.7701     
## inducerPropofol/Ketamine                   -2.9913   1.9583     
## inducerPropofol/Midazolam                  -2.4196   1.4168     
## inducerPropofol/Thipental                  -3.9981   2.2580     
## inducerThiopental                          -1.0611   2.0349     
## opioidFentanyl                             -0.1396   1.5430     
## opioidFentanyl/Sulfentanil                 -3.5679   0.6958     
## opioidMorphine                             -2.2486   3.4133     
## opioidRemifentanil                         -0.7554   1.1563     
## opioidSulfentanil                          -0.7225   1.8234     
## blockerAtracurium                          -1.0361   1.6327     
## blockerCisatracurium                       -0.3876   2.3965     
## blockerRocuronium                          -1.1945   1.2507     
## blockerRocuronium/Vecuronium               -2.9523   2.0409     
## blockerSuccinylcholine                     -1.8020   1.0685     
## blockerSuccinylcholine/Rocuronium          -0.8561   7.7865   . 
## blockerVecuronium                          -0.2500   2.3521     
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Subgroup analysis for failed intubation by operator experience
meta::update.meta(mbin_fia_random, byvar = experience, tau.common = FALSE)
##                             RR             95%-CI %W(random)      experience
## Abdallah 2011          10.7822 [0.6123; 189.8605]        1.4 Non-experienced
## Abdelgalel 2018         0.1677 [0.0070;   4.0257]        1.2     Experienced
## Altun 2018              0.3496 [0.0607;   2.0132]        2.4     Experienced
## Ander 2017              0.0909 [0.0052;   1.5894]        1.4     Experienced
## Andersen 2011           0.2000 [0.0098;   4.0624]        1.3     Experienced
## Aoi 2010                1.0000 [0.0676;  14.7865]        1.5     Experienced
## Arima 2014              4.7345 [0.2326;  96.3642]        1.3     Experienced
## Aziz 2012               0.4718 [0.2387;   0.9327]        3.7 Non-experienced
## Bakshi - NTI 2015       1.7500 [0.4169;   7.3460]        2.8 Non-experienced
## Bakshi - NVL 2015       5.5965 [0.3319;  94.3792]        1.4 Non-experienced
## Bakshi 2019             0.3333 [0.0140;   7.9235]        1.2 Non-experienced
## Bhandari 2013           0.2000 [0.0099;   4.0371]        1.3                
## Chalkeidis 2010         3.2000 [0.3787;  27.0413]        2.0 Non-experienced
## Chandrashekaraiah 2017  0.3333 [0.0141;   7.8648]        1.2                
## Dhonneur 2008           0.3333 [0.0137;   8.0906]        1.2     Experienced
## Driver 2016             0.2306 [0.0502;   1.0587]        2.6 Non-experienced
## Erden 2010              3.0000 [0.1315;  68.4178]        1.2     Experienced
## Foulds 2016             0.0694 [0.0042;   1.1508]        1.4     Experienced
## Gao 2018                0.7593 [0.2757;   2.0909]        3.3     Experienced
## Hu 2017                 0.3167 [0.0131;   7.6805]        1.2     Experienced
## Ilyas 2014             11.0000 [0.6209; 194.8652]        1.4     Experienced
## Jungbauer 2009          0.1250 [0.0159;   0.9810]        2.0     Experienced
## Kill 2013               0.1429 [0.0077;   2.6497]        1.3 Non-experienced
## Kleine-Brueggeney 2017  0.3462 [0.2514;   0.4766]        4.0     Experienced
## Koh 2010                0.2500 [0.0300;   2.0828]        2.0     Experienced
## Lin 2012                0.6667 [0.1143;   3.8895]        2.4     Experienced
## Liu 2016                2.0000 [0.1846;  21.6662]        1.7 Non-experienced
## Liu 2019                0.0481 [0.0028;   0.8155]        1.4     Experienced
## Maharaj 2007            0.3333 [0.0144;   7.7130]        1.2     Experienced
## Maharaj 2008            0.1111 [0.0064;   1.9341]        1.4     Experienced
## Malik 2008              0.5000 [0.0877;   2.8510]        2.4     Experienced
## Malik1 2009             0.1250 [0.0147;   1.0604]        2.0     Experienced
## Mcelwain 2011           0.2672 [0.0252;   2.8318]        1.8     Experienced
## Myunghun-Kim 2017       3.0000 [0.1297;  69.4167]        1.2     Experienced
## Inangil 2018            0.2000 [0.0100;   4.0192]        1.3     Experienced
## Ndoko 2008              0.0769 [0.0044;   1.3317]        1.4                
## Parasa 2016             2.9048 [0.1230;  68.5796]        1.2 Non-experienced
## Peirovifar 2014         0.4000 [0.0876;   1.8256]        2.7 Non-experienced
## Pournajafian 2014       2.0000 [0.5276;   7.5816]        2.9 Non-experienced
## Ranieri 2012            0.1883 [0.0092;   3.8484]        1.3     Experienced
## Risse 2020              0.1826 [0.0091;   3.6594]        1.3     Experienced
## Ruetzeler 2020          0.3818 [0.0769;   1.8970]        2.5     Experienced
## Shah 2016               0.3333 [0.0141;   7.8648]        1.2     Experienced
## Shukla 2017             0.2000 [0.0099;   4.0371]        1.3     Experienced
## Sun 2005                0.3333 [0.0137;   8.0852]        1.2     Experienced
## Takenaka 2011           0.0883 [0.0051;   1.5379]        1.4     Experienced
## Taylor 2013             0.0270 [0.0017;   0.4348]        1.4                
## Walker 2009             3.0000 [0.1247;  72.1913]        1.2 Non-experienced
## Yoo 2018                0.2000 [0.0254;   1.5756]        2.0     Experienced
## Cavus 2011              0.0387 [0.0022;   0.6726]        1.4 Non-experienced
## Enomoto 2008            0.0457 [0.0027;   0.7646]        1.4                
## Zhao 2014               0.3649 [0.1828;   0.7281]        3.7 Non-experienced
## Cordovani 2019          0.5000 [0.1359;   1.8393]        2.9     Experienced
## Ferrando 2011           3.0000 [0.1271;  70.7833]        1.2 Non-experienced
## Serocki 2013            0.0569 [0.0032;   1.0243]        1.4                
## Serocki 2010            0.2500 [0.0478;   1.3075]        2.5                
## 
## Number of studies combined: k = 56
## 
##                          RR           95%-CI     t  p-value
## Random effects model 0.4100 [0.2881; 0.5833] -5.07 < 0.0001
## Prediction interval         [0.0500; 3.3648]               
## 
## Quantifying heterogeneity:
##  tau^2 = 1.0714 [0.0000; 1.2285]; tau = 1.0351 [0.0000; 1.1084];
##  I^2 = 17.5% [0.0%; 41.3%]; H = 1.10 [1.00; 1.31]
## 
## Quantifying residual heterogeneity:
##  I^2 = 2.6% [0.0%; 26.2%]; H = 1.01 [1.00; 1.16]
## 
## Test of heterogeneity:
##      Q d.f. p-value
##  66.65   55  0.1349
## 
## Results for subgroups (random effects model):
##                                k     RR           95%-CI  tau^2    tau     Q
## experience =                   7 0.1112 [0.0461; 0.2683] 0.2732 0.5226  3.17
## experience = Experienced      33 0.3501 [0.2413; 0.5080] 0.5920 0.7694 24.53
## experience = Non-experienced  16 0.8810 [0.4223; 1.8376] 1.2906 1.1360 26.69
##                                I^2
## experience =                  0.0%
## experience = Experienced      0.0%
## experience = Non-experienced 43.8%
## 
## Test for subgroup differences (random effects model):
##                      Q d.f. p-value
## Between groups   17.28    2  0.0002
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Continuity correction of 0.5 in studies with zero cell frequencies
#Subgroup analysis for failed intubation by predicted difficult intubation
meta::update.meta(mbin_fia_random, byvar = predicted, tau.common = FALSE)
##                             RR             95%-CI %W(random) predicted
## Abdallah 2011          10.7822 [0.6123; 189.8605]        1.4 Difficult
## Abdelgalel 2018         0.1677 [0.0070;   4.0257]        1.2 Difficult
## Altun 2018              0.3496 [0.0607;   2.0132]        2.4      Easy
## Ander 2017              0.0909 [0.0052;   1.5894]        1.4      Easy
## Andersen 2011           0.2000 [0.0098;   4.0624]        1.3 Difficult
## Aoi 2010                1.0000 [0.0676;  14.7865]        1.5 Difficult
## Arima 2014              4.7345 [0.2326;  96.3642]        1.3 Difficult
## Aziz 2012               0.4718 [0.2387;   0.9327]        3.7 Difficult
## Bakshi - NTI 2015       1.7500 [0.4169;   7.3460]        2.8      Easy
## Bakshi - NVL 2015       5.5965 [0.3319;  94.3792]        1.4      Easy
## Bakshi 2019             0.3333 [0.0140;   7.9235]        1.2      Easy
## Bhandari 2013           0.2000 [0.0099;   4.0371]        1.3 Difficult
## Chalkeidis 2010         3.2000 [0.3787;  27.0413]        2.0      Easy
## Chandrashekaraiah 2017  0.3333 [0.0141;   7.8648]        1.2      Easy
## Dhonneur 2008           0.3333 [0.0137;   8.0906]        1.2          
## Driver 2016             0.2306 [0.0502;   1.0587]        2.6          
## Erden 2010              3.0000 [0.1315;  68.4178]        1.2      Easy
## Foulds 2016             0.0694 [0.0042;   1.1508]        1.4 Difficult
## Gao 2018                0.7593 [0.2757;   2.0909]        3.3          
## Hu 2017                 0.3167 [0.0131;   7.6805]        1.2      Easy
## Ilyas 2014             11.0000 [0.6209; 194.8652]        1.4 Difficult
## Jungbauer 2009          0.1250 [0.0159;   0.9810]        2.0 Difficult
## Kill 2013               0.1429 [0.0077;   2.6497]        1.3 Difficult
## Kleine-Brueggeney 2017  0.3462 [0.2514;   0.4766]        4.0 Difficult
## Koh 2010                0.2500 [0.0300;   2.0828]        2.0 Difficult
## Lin 2012                0.6667 [0.1143;   3.8895]        2.4      Easy
## Liu 2016                2.0000 [0.1846;  21.6662]        1.7      Easy
## Liu 2019                0.0481 [0.0028;   0.8155]        1.4      Easy
## Maharaj 2007            0.3333 [0.0144;   7.7130]        1.2 Difficult
## Maharaj 2008            0.1111 [0.0064;   1.9341]        1.4 Difficult
## Malik 2008              0.5000 [0.0877;   2.8510]        2.4 Difficult
## Malik1 2009             0.1250 [0.0147;   1.0604]        2.0 Difficult
## Mcelwain 2011           0.2672 [0.0252;   2.8318]        1.8 Difficult
## Myunghun-Kim 2017       3.0000 [0.1297;  69.4167]        1.2      Easy
## Inangil 2018            0.2000 [0.0100;   4.0192]        1.3      Easy
## Ndoko 2008              0.0769 [0.0044;   1.3317]        1.4 Difficult
## Parasa 2016             2.9048 [0.1230;  68.5796]        1.2      Easy
## Peirovifar 2014         0.4000 [0.0876;   1.8256]        2.7      Easy
## Pournajafian 2014       2.0000 [0.5276;   7.5816]        2.9      Easy
## Ranieri 2012            0.1883 [0.0092;   3.8484]        1.3 Difficult
## Risse 2020              0.1826 [0.0091;   3.6594]        1.3      Easy
## Ruetzeler 2020          0.3818 [0.0769;   1.8970]        2.5 Difficult
## Shah 2016               0.3333 [0.0141;   7.8648]        1.2      Easy
## Shukla 2017             0.2000 [0.0099;   4.0371]        1.3 Difficult
## Sun 2005                0.3333 [0.0137;   8.0852]        1.2      Easy
## Takenaka 2011           0.0883 [0.0051;   1.5379]        1.4      Easy
## Taylor 2013             0.0270 [0.0017;   0.4348]        1.4 Difficult
## Walker 2009             3.0000 [0.1247;  72.1913]        1.2      Easy
## Yoo 2018                0.2000 [0.0254;   1.5756]        2.0      Easy
## Cavus 2011              0.0387 [0.0022;   0.6726]        1.4      Easy
## Enomoto 2008            0.0457 [0.0027;   0.7646]        1.4 Difficult
## Zhao 2014               0.3649 [0.1828;   0.7281]        3.7      Easy
## Cordovani 2019          0.5000 [0.1359;   1.8393]        2.9 Difficult
## Ferrando 2011           3.0000 [0.1271;  70.7833]        1.2      Easy
## Serocki 2013            0.0569 [0.0032;   1.0243]        1.4 Difficult
## Serocki 2010            0.2500 [0.0478;   1.3075]        2.5 Difficult
## 
## Number of studies combined: k = 56
## 
##                          RR           95%-CI     t  p-value
## Random effects model 0.4100 [0.2881; 0.5833] -5.07 < 0.0001
## Prediction interval         [0.0500; 3.3648]               
## 
## Quantifying heterogeneity:
##  tau^2 = 1.0714 [0.0000; 1.2285]; tau = 1.0351 [0.0000; 1.1084];
##  I^2 = 17.5% [0.0%; 41.3%]; H = 1.10 [1.00; 1.31]
## 
## Quantifying residual heterogeneity:
##  I^2 = 14.2% [0.0%; 39.3%]; H = 1.08 [1.00; 1.28]
## 
## Test of heterogeneity:
##      Q d.f. p-value
##  66.65   55  0.1349
## 
## Results for subgroups (random effects model):
##                         k     RR           95%-CI  tau^2    tau     Q   I^2
## predicted =             3 0.4822 [0.0889; 2.6164] 0.1342 0.3664  1.70  0.0%
## predicted = Difficult  27 0.2930 [0.1752; 0.4901] 1.0743 1.0365 28.70  9.4%
## predicted = Easy       26 0.5907 [0.3424; 1.0191] 1.0529 1.0261 31.40 20.4%
## 
## Test for subgroup differences (random effects model):
##                     Q d.f. p-value
## Between groups   3.85    2  0.1456
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Continuity correction of 0.5 in studies with zero cell frequencies
#Funnel Plot for failed intubation
meta::funnel(mbin_fia_random,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))
meta::metabias(mbin_fia_random, method.bias = "linreg")
## 
##  Linear regression test of funnel plot asymmetry
## 
## data:  mbin_fia_random
## t = 0.16145, df = 54, p-value = 0.8723
## alternative hypothesis: asymmetry in funnel plot
## sample estimates:
##        bias     se.bias   intercept 
##  0.03723848  0.23065423 -0.93259323
dmetar::eggers.test(mbin_fia_random)
##              Intercept ConfidenceInterval     t       p
## Egger's test     0.037       -0.355-0.429 0.161 0.87234
meta::funnel(mbin_fia_random,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))

Failed First Intubation Attempt

first<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Table Pairwise Failed First Intubation Attempt.csv")
length(first$ffirst.e1)
## [1] 110
#Number of comparisons with zero failed first intubation attempts in both arms
first_zeros<-dplyr::filter(first,first$ffirst.e1==0 & first$ffirst.e2==0)
length(first_zeros$ffirst.e1)
## [1] 12
#Table for Meta-analysis of failed first intubation attempts
first_analysis<-dplyr::filter(first,first$ffirst.e1>0 | first$ffirst.e2>0)

#Number of comparisons and patients meta-analized for failed first intubation attempts
length(first_analysis$ffirst.e1)
## [1] 98
sum(first_analysis$ffirst.t1,first_analysis$ffirst.t2)
## [1] 11287
#Meta-analysis for failed first intubation attempts
mbin_first_random<-meta::metabin(ffirst.e1,ffirst.t1,ffirst.e2,ffirst.t2,data = first_analysis,studlab = paste(author, year),comb.fixed = FALSE,comb.random = TRUE,method.tau = "SJ",hakn = TRUE,prediction = TRUE,incr = 0.5,sm = "RR")
mbin_first_random
##                             RR             95%-CI %W(random)
## Abdallah 2011           1.7150 [0.5357;   5.4904]        1.3
## Abdallah 2019           0.5000 [0.0475;   5.2653]        0.6
## Abdelgalel 2018         0.2273 [0.0847;   0.6096]        1.4
## Abdelgawad 2015         0.3333 [0.0140;   7.9424]        0.4
## Agrawal 2020            0.3333 [0.0140;   7.9424]        0.4
## Akbar 2015              0.1667 [0.0209;   1.3291]        0.8
## Al - Ghamdi 2016        1.4323 [0.6780;   3.0256]        1.6
## Ali 2012                0.3333 [0.1021;   1.0883]        1.3
## Ali 2017                0.3333 [0.0367;   3.0260]        0.7
## Altun 2018              0.8333 [0.3261;   2.1296]        1.4
## Ander 2017              0.0909 [0.0052;   1.5894]        0.5
## Andersen 2011           0.2500 [0.0289;   2.1590]        0.7
## Aoi 2010                1.0000 [0.2946;   3.3948]        1.2
## Aqil 2016               0.8571 [0.3158;   2.3264]        1.4
## Aqil 2017               0.4000 [0.1648;   0.9710]        1.5
## Arima 2014              2.1841 [1.2837;   3.7161]        1.7
## Arslan 2017             3.5217 [0.1864;  66.5522]        0.5
## Bakshi 2019             0.9730 [0.0632;  14.9718]        0.5
## Barak 2007              0.4500 [0.1469;   1.3788]        1.3
## Bashir 2020             0.2857 [0.0632;   1.2922]        1.0
## Colak 2019              0.2500 [0.0291;   2.1505]        0.7
## El - Tahan 2018         2.0594 [0.7770;   5.4582]        1.4
## Huang 2020              2.7119 [1.2771;   5.7587]        1.5
## Wasem 2013              0.5000 [0.0989;   2.5270]        1.0
## Bhalla 2018             2.0000 [0.4291;   9.3210]        1.0
## Bharti 2014             0.3167 [0.0360;   2.7855]        0.7
## Bhat 2015               0.4286 [0.1174;   1.5639]        1.2
## Bilehjani 2009          3.4833 [1.0524;  11.5293]        1.2
## Blajic 2019             0.6611 [0.1529;   2.8581]        1.1
## Cakir 2020              0.3333 [0.0141;   7.8748]        0.4
## Carlino 2009            0.0667 [0.0042;   1.0690]        0.5
## Dey 2020                0.3684 [0.2264;   0.5996]        1.7
## Di Marco 2011           0.6364 [0.2668;   1.5179]        1.5
## Driver 2016             0.5676 [0.2462;   1.3087]        1.5
## Erden 2010              3.0000 [0.1315;  68.4178]        0.4
## Erturk 2015             0.4286 [0.1192;   1.5407]        1.2
## Gao 2018                1.0528 [0.6679;   1.6597]        1.7
## Goksu 2016              0.6129 [0.3819;   0.9835]        1.7
## Griesdale 2012          0.9231 [0.5705;   1.4934]        1.7
## Gunes 2020              0.2857 [0.0610;   1.3382]        1.0
## Gupta 2020              0.5000 [0.0472;   5.2961]        0.6
## Hirabayashi 2009        0.1385 [0.0754;   0.2544]        1.6
## Hirabayashi 2010        0.2609 [0.1110;   0.6132]        1.5
## Hosalli 2017            0.4286 [0.1223;   1.5022]        1.2
## Hsu 2012                0.1111 [0.0062;   1.9760]        0.5
## Hu 2017                 0.1920 [0.0093;   3.9489]        0.5
## Kaur 2020               0.1000 [0.0121;   0.8275]        0.7
## Kido 2015               0.1111 [0.0152;   0.8130]        0.8
## Kim 2013                0.1160 [0.0066;   2.0345]        0.5
## Kleine-Brueggeney 2017  0.4831 [0.3851;   0.6060]        1.8
## Koh 2010                0.0667 [0.0095;   0.4671]        0.8
## Kreutziger 2019         1.2289 [0.8567;   1.7628]        1.8
## Kunaz 2016              1.3333 [0.3144;   5.6542]        1.1
## Laosuwan 2015           2.0000 [0.2107;  18.9807]        0.7
## Lascarrou 2017          1.0902 [0.7957;   1.4937]        1.8
## Lim 2005                0.5000 [0.0989;   2.5270]        1.0
## Lin 2012                0.4000 [0.1865;   0.8577]        1.5
## Liu 2014                0.1000 [0.0134;   0.7451]        0.8
## Liu 2016                1.6667 [0.6324;   4.3923]        1.4
## Liu 2019                0.3932 [0.1684;   0.9184]        1.5
## Macke 2020              0.2500 [0.0876;   0.7133]        1.3
## Maharaj 2006            0.3333 [0.0141;   7.8648]        0.4
## Maharaj 2007            0.3333 [0.0144;   7.7130]        0.4
## Maharaj 2008            0.1429 [0.0193;   1.0572]        0.8
## Malik 2008              0.9167 [0.3153;   2.6649]        1.3
## Malik1 2009             0.6250 [0.2819;   1.3856]        1.5
## Malik2 2009             2.0000 [0.1914;  20.8980]        0.6
## Mcelwain 2011           0.3563 [0.1087;   1.1685]        1.3
## Myunghun-Kim 2017       3.0000 [0.1297;  69.4167]        0.4
## Kulkarni 2013           3.0000 [0.1271;  70.7833]        0.4
## Inangil 2018            0.2000 [0.0100;   4.0192]        0.5
## Ing 2017                2.7273 [0.2815;  26.4214]        0.7
## Ndoko 2008              0.1111 [0.0061;   2.0134]        0.5
## Nishiyama 2011          3.0542 [1.1715;   7.9626]        1.4
## Parasa 2016            13.0000 [0.7654; 220.8017]        0.5
## Peirovifar 2014         0.5000 [0.1789;   1.3975]        1.4
## Ranieri 2012            0.0554 [0.0033;   0.9403]        0.5
## Reena 2019              0.3077 [0.1077;   0.8792]        1.3
## Risse 2020              1.5196 [0.3955;   5.8387]        1.1
## Ruetzeler 2020          0.6818 [0.2282;   2.0370]        1.3
## Sargin 2016             0.0667 [0.0039;   1.1365]        0.5
## Shah 2016               0.2857 [0.1062;   0.7684]        1.4
## Shukla 2017             0.1250 [0.0164;   0.9538]        0.8
## Sulser 2016             2.9597 [0.1225;  71.4820]        0.4
## Sun 2005                2.0000 [0.5144;   7.7761]        1.1
## Takenaka 2011           0.0883 [0.0051;   1.5379]        0.5
## Taylor 2013             0.0270 [0.0017;   0.4348]        0.5
## Teoh 2010               3.5000 [0.8354;  14.6640]        1.1
## Tsan 2020               0.3333 [0.0355;   3.1262]        0.7
## Varsha 2019             0.2000 [0.0100;   4.0192]        0.5
## Yoo 2018                0.2000 [0.0254;   1.5756]        0.8
## Yumul 2016              0.5597 [0.2565;   1.2215]        1.5
## El-Tahan 2017           4.6774 [0.2446;  89.4307]        0.5
## Lee 2012                3.0000 [1.1854;   7.5925]        1.4
## Zhao 2014               0.4223 [0.2800;   0.6370]        1.7
## Ferrando 2011           0.1667 [0.0213;   1.3020]        0.8
## Serocki 2013            0.7111 [0.2449;   2.0646]        1.3
## Serocki 2010            0.7000 [0.2370;   2.0676]        1.3
## 
## Number of studies combined: k = 98
## 
##                          RR           95%-CI     t  p-value
## Random effects model 0.5810 [0.4721; 0.7151] -5.19 < 0.0001
## Prediction interval         [0.1006; 3.3549]               
## 
## Quantifying heterogeneity:
##  tau^2 = 0.7693 [0.2316; 0.8332]; tau = 0.8771 [0.4812; 0.9128];
##  I^2 = 61.7% [52.3%; 69.2%]; H = 1.62 [1.45; 1.80]
## 
## Test of heterogeneity:
##       Q d.f.  p-value
##  253.11   97 < 0.0001
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Continuity correction of 0.5 in studies with zero cell frequencies
#Estimated probability of failed intubation with Macintosh
meta::metaprop(event = ffirst.e2,n = ffirst.t2 ,studlab = paste(author,year),data = first,method = "GLMM",sm = "PLOGIT",comb.fixed = FALSE,comb.random = TRUE,hakn = TRUE)
##                          proportion           95%-CI
## Abdallah 2011                0.0816 [0.0227; 0.1960]
## Abdallah 2019                0.0571 [0.0070; 0.1916]
## Abdelgalel 2018              0.2750 [0.1460; 0.4389]
## Abdelgawad 2015              0.0250 [0.0006; 0.1316]
## Agrawal 2020                 0.0250 [0.0006; 0.1316]
## Ahmad 2016                   0.0000 [0.0000; 0.0377]
## Ahmad 2015                   0.0000 [0.0000; 0.1372]
## Akbar 2015                   0.1333 [0.0505; 0.2679]
## Al - Ghamdi 2016             0.2727 [0.1073; 0.5022]
## Ali 2012                     0.3600 [0.1797; 0.5748]
## Ali 2017                     0.1000 [0.0211; 0.2653]
## Altun 2018                   0.1500 [0.0571; 0.2984]
## Ander 2017                   0.1282 [0.0430; 0.2743]
## Andersen 2011                0.0800 [0.0222; 0.1923]
## Aoi 2010                     0.2222 [0.0641; 0.4764]
## Aqil 2016                    0.1750 [0.0734; 0.3278]
## Aqil 2017                    0.2143 [0.1252; 0.3287]
## Arici 2014                   0.0000 [0.0000; 0.0881]
## Arima 2014                   0.2453 [0.1376; 0.3828]
## Arslan 2017                  0.0000 [0.0000; 0.0881]
## Bakshi 2019                  0.0278 [0.0007; 0.1453]
## Barak 2007                   0.1111 [0.0546; 0.1949]
## Barman 2017                  0.0000 [0.0000; 0.1000]
## Bashir 2020                  0.1750 [0.0734; 0.3278]
## Colak 2019                   0.0889 [0.0248; 0.2122]
## El - Tahan 2018              0.1250 [0.0351; 0.2899]
## Huang 2020                   0.2000 [0.0771; 0.3857]
## Wasem 2013                   0.1333 [0.0376; 0.3072]
## Yao 2015                     0.0000 [0.0000; 0.0740]
## Bhalla 2018                  0.1333 [0.0166; 0.4046]
## Bharti 2014                  0.1579 [0.0338; 0.3958]
## Bhat 2015                    0.1400 [0.0582; 0.2674]
## Bilehjani 2009               0.0789 [0.0166; 0.2138]
## Blajic 2019                  0.0508 [0.0106; 0.1415]
## Cakir 2020                   0.0323 [0.0008; 0.1670]
## Carlino 2009                 0.4667 [0.2127; 0.7341]
## Dey 2020                     0.4273 [0.3334; 0.5252]
## Di Marco 2011                0.2037 [0.1063; 0.3353]
## Driver 2016                  0.1368 [0.0749; 0.2226]
## Erden 2010                   0.0000 [0.0000; 0.2059]
## Erturk 2015                  0.1750 [0.0734; 0.3278]
## Gao 2018                     0.3049 [0.2080; 0.4164]
## Goksu 2016                   0.4133 [0.3008; 0.5330]
## Griesdale 2012               0.6500 [0.4078; 0.8461]
## Gunes 2020                   0.0778 [0.0318; 0.1537]
## Gupta 2020                   0.0500 [0.0061; 0.1692]
## Hirabayashi 2009             0.3008 [0.2453; 0.3610]
## Hirabayashi 2010             0.2300 [0.1517; 0.3249]
## Hosalli 2017                 0.2333 [0.0993; 0.4228]
## Hsu 2012                     0.1333 [0.0376; 0.3072]
## Hu 2017                      0.0208 [0.0025; 0.0732]
## Kaur 2020                    0.1250 [0.0419; 0.2680]
## Kido 2015                    0.3600 [0.1797; 0.5748]
## Kim 2013                     0.1739 [0.0495; 0.3878]
## Kleine-Brueggeney 2017       0.5750 [0.4815; 0.6647]
## Koh 2010                     0.6000 [0.3867; 0.7887]
## Kreutziger 2019              0.1700 [0.1254; 0.2228]
## K\x9f\x8d\x9fkosman 2020     0.0000 [0.0000; 0.1157]
## Kunaz 2016                   0.0600 [0.0125; 0.1655]
## Laosuwan 2015                0.0909 [0.0023; 0.4128]
## Lascarrou 2017               0.2857 [0.2213; 0.3572]
## Lim 2005                     0.1333 [0.0376; 0.3072]
## Lin 2012                     0.2353 [0.1500; 0.3397]
## Liu 2014                     0.2500 [0.1269; 0.4120]
## Liu 2016                     0.0667 [0.0249; 0.1395]
## Liu 2019                     0.0994 [0.0600; 0.1526]
## Macke 2020                   0.2105 [0.1254; 0.3192]
## Maharaj 2006                 0.0333 [0.0008; 0.1722]
## Maharaj 2007                 0.0500 [0.0013; 0.2487]
## Maharaj 2008                 0.3500 [0.1539; 0.5922]
## Malik 2008                   0.1333 [0.0376; 0.3072]
## Malik1 2009                  0.3200 [0.1495; 0.5350]
## Malik2 2009                  0.0333 [0.0008; 0.1722]
## Mcelwain 2011                0.1935 [0.0745; 0.3747]
## Myunghun-Kim 2017            0.0000 [0.0000; 0.1684]
## Kulkarni 2013                0.0000 [0.0000; 0.1157]
## Inangil 2018                 0.0571 [0.0070; 0.1916]
## Ing 2017                     0.0667 [0.0017; 0.3195]
## Jafra 2018                   0.0000 [0.0000; 0.0362]
## Ndoko 2008                   0.0755 [0.0209; 0.1821]
## Nishiyama 2011               0.1143 [0.0320; 0.2674]
## Parasa 2016                  0.0000 [0.0000; 0.1157]
## Pazur 2016                   0.0000 [0.0000; 0.1323]
## Peirovifar 2014              0.4000 [0.1912; 0.6395]
## Pournajafian 2014            0.0000 [0.0000; 0.0725]
## Ranieri 2012                 0.1250 [0.0555; 0.2315]
## Reena 2019                   0.2600 [0.1463; 0.4034]
## Risse 2020                   0.0968 [0.0204; 0.2575]
## Ruetzeler 2020               0.1111 [0.0459; 0.2156]
## Sargin 2016                  0.1400 [0.0582; 0.2674]
## Shah 2016                    0.4667 [0.2834; 0.6567]
## Shukla 2017                  0.2000 [0.0905; 0.3565]
## Sulser 2016                  0.0000 [0.0000; 0.0493]
## Sun 2005                     0.0300 [0.0062; 0.0852]
## Takenaka 2011                0.1471 [0.0495; 0.3106]
## Taylor 2013                  0.4091 [0.2634; 0.5675]
## Teoh 2010                    0.0200 [0.0024; 0.0704]
## Tsan 2020                    0.0435 [0.0091; 0.1218]
## Varsha 2019                  0.0571 [0.0070; 0.1916]
## Vijayakumar 2016             0.0000 [0.0000; 0.0787]
## Yoo 2018                     0.2273 [0.0782; 0.4537]
## Yumul 2016                   0.2581 [0.1186; 0.4461]
## El-Tahan 2017                0.0000 [0.0000; 0.2316]
## Lee 2012                     0.1600 [0.0454; 0.3608]
## Paik 2020                    0.0000 [0.0000; 0.3085]
## Zhao 2014                    0.6400 [0.5209; 0.7477]
## Ferrando 2011                0.2000 [0.0771; 0.3857]
## Serocki 2013                 0.1562 [0.0528; 0.3279]
## Serocki 2010                 0.1250 [0.0419; 0.2680]
## Arora 2013                   0.0000 [0.0000; 0.0660]
## 
## Number of studies combined: k = 110
## 
##                      proportion           95%-CI
## Random effects model     0.1042 [0.0817; 0.1320]
## 
## Quantifying heterogeneity:
##  tau^2 = 1.5543; tau = 1.2467; I^2 = 90.1%; H = 3.18
## 
## Test of heterogeneity:
##       Q d.f.  p-value             Test
##  486.12  109 < 0.0001        Wald-type
##  919.51  109 < 0.0001 Likelihood-Ratio
## 
## Details on meta-analytical method:
## - Random intercept logistic regression model
## - Maximum-likelihood estimator for tau^2
## - Hartung-Knapp adjustment for random effects model
## - Logit transformation
## - Clopper-Pearson confidence interval for individual studies
## - Continuity correction of 0.5 in studies with zero cell frequencies
##   (only used to calculate individual study results)
#Forest plot for failed first intubation attempt
meta::forest(mbin_first_random,sortvar=TE,lab.e="Videolaryngoscopes",lab.c="Macintosh",col.study="black",col.square="black",col.diamond="blue")

meta::forest(mbin_first_random,sortvar=TE,lab.e="Videolaryngoscopes",lab.c="Macintosh",col.study="black",col.square="black",col.diamond="blue")

#Detecting Outliers for failed first intubation attempts
dmetar::find.outliers(mbin_first_random)
## Identified outliers (random-effects model) 
## ------------------------------------------ 
## "Arima 2014", "El - Tahan 2018", "Huang 2020", "Bilehjani 2009", "Hirabayashi 2009", "Koh 2010", "Kreutziger 2019", "Lascarrou 2017", "Nishiyama 2011", "Parasa 2016", "Taylor 2013", "Teoh 2010", "Lee 2012" 
##  
## Results with outliers removed 
## ----------------------------- 
##                             RR             95%-CI %W(random) exclude
## Abdallah 2011           1.7150 [0.5357;   5.4904]        1.5        
## Abdallah 2019           0.5000 [0.0475;   5.2653]        0.7        
## Abdelgalel 2018         0.2273 [0.0847;   0.6096]        1.8        
## Abdelgawad 2015         0.3333 [0.0140;   7.9424]        0.4        
## Agrawal 2020            0.3333 [0.0140;   7.9424]        0.4        
## Akbar 2015              0.1667 [0.0209;   1.3291]        0.8        
## Al - Ghamdi 2016        1.4323 [0.6780;   3.0256]        2.1        
## Ali 2012                0.3333 [0.1021;   1.0883]        1.5        
## Ali 2017                0.3333 [0.0367;   3.0260]        0.7        
## Altun 2018              0.8333 [0.3261;   2.1296]        1.8        
## Ander 2017              0.0909 [0.0052;   1.5894]        0.5        
## Andersen 2011           0.2500 [0.0289;   2.1590]        0.8        
## Aoi 2010                1.0000 [0.2946;   3.3948]        1.5        
## Aqil 2016               0.8571 [0.3158;   2.3264]        1.7        
## Aqil 2017               0.4000 [0.1648;   0.9710]        1.9        
## Arima 2014              2.1841 [1.2837;   3.7161]        0.0       *
## Arslan 2017             3.5217 [0.1864;  66.5522]        0.5        
## Bakshi 2019             0.9730 [0.0632;  14.9718]        0.5        
## Barak 2007              0.4500 [0.1469;   1.3788]        1.6        
## Bashir 2020             0.2857 [0.0632;   1.2922]        1.2        
## Colak 2019              0.2500 [0.0291;   2.1505]        0.8        
## El - Tahan 2018         2.0594 [0.7770;   5.4582]        0.0       *
## Huang 2020              2.7119 [1.2771;   5.7587]        0.0       *
## Wasem 2013              0.5000 [0.0989;   2.5270]        1.1        
## Bhalla 2018             2.0000 [0.4291;   9.3210]        1.2        
## Bharti 2014             0.3167 [0.0360;   2.7855]        0.7        
## Bhat 2015               0.4286 [0.1174;   1.5639]        1.4        
## Bilehjani 2009          3.4833 [1.0524;  11.5293]        0.0       *
## Blajic 2019             0.6611 [0.1529;   2.8581]        1.2        
## Cakir 2020              0.3333 [0.0141;   7.8748]        0.4        
## Carlino 2009            0.0667 [0.0042;   1.0690]        0.5        
## Dey 2020                0.3684 [0.2264;   0.5996]        2.4        
## Di Marco 2011           0.6364 [0.2668;   1.5179]        1.9        
## Driver 2016             0.5676 [0.2462;   1.3087]        2.0        
## Erden 2010              3.0000 [0.1315;  68.4178]        0.4        
## Erturk 2015             0.4286 [0.1192;   1.5407]        1.4        
## Gao 2018                1.0528 [0.6679;   1.6597]        2.4        
## Goksu 2016              0.6129 [0.3819;   0.9835]        2.4        
## Griesdale 2012          0.9231 [0.5705;   1.4934]        2.4        
## Gunes 2020              0.2857 [0.0610;   1.3382]        1.2        
## Gupta 2020              0.5000 [0.0472;   5.2961]        0.7        
## Hirabayashi 2009        0.1385 [0.0754;   0.2544]        0.0       *
## Hirabayashi 2010        0.2609 [0.1110;   0.6132]        1.9        
## Hosalli 2017            0.4286 [0.1223;   1.5022]        1.4        
## Hsu 2012                0.1111 [0.0062;   1.9760]        0.5        
## Hu 2017                 0.1920 [0.0093;   3.9489]        0.4        
## Kaur 2020               0.1000 [0.0121;   0.8275]        0.8        
## Kido 2015               0.1111 [0.0152;   0.8130]        0.8        
## Kim 2013                0.1160 [0.0066;   2.0345]        0.5        
## Kleine-Brueggeney 2017  0.4831 [0.3851;   0.6060]        2.6        
## Koh 2010                0.0667 [0.0095;   0.4671]        0.0       *
## Kreutziger 2019         1.2289 [0.8567;   1.7628]        0.0       *
## Kunaz 2016              1.3333 [0.3144;   5.6542]        1.3        
## Laosuwan 2015           2.0000 [0.2107;  18.9807]        0.7        
## Lascarrou 2017          1.0902 [0.7957;   1.4937]        0.0       *
## Lim 2005                0.5000 [0.0989;   2.5270]        1.1        
## Lin 2012                0.4000 [0.1865;   0.8577]        2.1        
## Liu 2014                0.1000 [0.0134;   0.7451]        0.8        
## Liu 2016                1.6667 [0.6324;   4.3923]        1.8        
## Liu 2019                0.3932 [0.1684;   0.9184]        1.9        
## Macke 2020              0.2500 [0.0876;   0.7133]        1.7        
## Maharaj 2006            0.3333 [0.0141;   7.8648]        0.4        
## Maharaj 2007            0.3333 [0.0144;   7.7130]        0.4        
## Maharaj 2008            0.1429 [0.0193;   1.0572]        0.8        
## Malik 2008              0.9167 [0.3153;   2.6649]        1.7        
## Malik1 2009             0.6250 [0.2819;   1.3856]        2.0        
## Malik2 2009             2.0000 [0.1914;  20.8980]        0.7        
## Mcelwain 2011           0.3563 [0.1087;   1.1685]        1.5        
## Myunghun-Kim 2017       3.0000 [0.1297;  69.4167]        0.4        
## Kulkarni 2013           3.0000 [0.1271;  70.7833]        0.4        
## Inangil 2018            0.2000 [0.0100;   4.0192]        0.4        
## Ing 2017                2.7273 [0.2815;  26.4214]        0.7        
## Ndoko 2008              0.1111 [0.0061;   2.0134]        0.5        
## Nishiyama 2011          3.0542 [1.1715;   7.9626]        0.0       *
## Parasa 2016            13.0000 [0.7654; 220.8017]        0.0       *
## Peirovifar 2014         0.5000 [0.1789;   1.3975]        1.7        
## Ranieri 2012            0.0554 [0.0033;   0.9403]        0.5        
## Reena 2019              0.3077 [0.1077;   0.8792]        1.7        
## Risse 2020              1.5196 [0.3955;   5.8387]        1.3        
## Ruetzeler 2020          0.6818 [0.2282;   2.0370]        1.6        
## Sargin 2016             0.0667 [0.0039;   1.1365]        0.5        
## Shah 2016               0.2857 [0.1062;   0.7684]        1.8        
## Shukla 2017             0.1250 [0.0164;   0.9538]        0.8        
## Sulser 2016             2.9597 [0.1225;  71.4820]        0.4        
## Sun 2005                2.0000 [0.5144;   7.7761]        1.3        
## Takenaka 2011           0.0883 [0.0051;   1.5379]        0.5        
## Taylor 2013             0.0270 [0.0017;   0.4348]        0.0       *
## Teoh 2010               3.5000 [0.8354;  14.6640]        0.0       *
## Tsan 2020               0.3333 [0.0355;   3.1262]        0.7        
## Varsha 2019             0.2000 [0.0100;   4.0192]        0.4        
## Yoo 2018                0.2000 [0.0254;   1.5756]        0.8        
## Yumul 2016              0.5597 [0.2565;   1.2215]        2.0        
## El-Tahan 2017           4.6774 [0.2446;  89.4307]        0.5        
## Lee 2012                3.0000 [1.1854;   7.5925]        0.0       *
## Zhao 2014               0.4223 [0.2800;   0.6370]        2.5        
## Ferrando 2011           0.1667 [0.0213;   1.3020]        0.8        
## Serocki 2013            0.7111 [0.2449;   2.0646]        1.7        
## Serocki 2010            0.7000 [0.2370;   2.0676]        1.6        
## 
## Number of studies combined: k = 85
## 
##                          RR           95%-CI     t  p-value
## Random effects model 0.5041 [0.4220; 0.6021] -7.67 < 0.0001
## Prediction interval         [0.1285; 1.9781]               
## 
## Quantifying heterogeneity:
##  tau^2 = 0.4645 [0.0000; 0.3173]; tau = 0.6815 [0.0000; 0.5633];
##  I^2 = 21.6% [0.0%; 40.6%]; H = 1.13 [1.00; 1.30]
## 
## Test of heterogeneity:
##       Q d.f. p-value
##  107.17   84  0.0449
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Continuity correction of 0.5 in studies with zero cell frequencies
#Influence Analysis for failed first intubation attempts
inf_analysis_first<-dmetar::InfluenceAnalysis(mbin_first_random,random = TRUE)
## [===========================================================================] DONE
plot(inf_analysis_first,"baujat")

#Meta-regression for failed first intubation attempts

#Controling for population characteristics (general, obese, neck immobilization)
meta::metareg(mbin_first_random,population)
## 
## Mixed-Effects Model (k = 98; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.7885 (SE = 0.1471)
## tau (square root of estimated tau^2 value):             0.8880
## I^2 (residual heterogeneity / unaccounted variability): 75.30%
## H^2 (unaccounted variability / sampling variability):   4.05
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 93) = 240.5046, p-val < .0001
## 
## Test of Moderators (coefficients 2:5):
## F(df1 = 4, df2 = 93) = 0.3788, p-val = 0.8232
## 
## Model Results:
## 
##                                estimate      se     tval    pval    ci.lb 
## intrcpt                         -0.3827  0.7894  -0.4848  0.6290  -1.9502 
## populationGeneral               -0.1010  0.7987  -0.1265  0.8996  -1.6871 
## populationNeck Immobilization   -0.4659  0.8406  -0.5543  0.5807  -2.1352 
## populationObese                 -0.2832  0.8765  -0.3231  0.7474  -2.0238 
## populationPregnant Women        -0.0313  1.2937  -0.0242  0.9808  -2.6004 
##                                 ci.ub 
## intrcpt                        1.1849    
## populationGeneral              1.4850    
## populationNeck Immobilization  1.2034    
## populationObese                1.4574    
## populationPregnant Women       2.5379    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for operator experience
meta::metareg(mbin_first_random,experience)
## 
## Mixed-Effects Model (k = 98; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.7846 (SE = 0.1453)
## tau (square root of estimated tau^2 value):             0.8858
## I^2 (residual heterogeneity / unaccounted variability): 76.39%
## H^2 (unaccounted variability / sampling variability):   4.24
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 95) = 252.8384, p-val < .0001
## 
## Test of Moderators (coefficients 2:3):
## F(df1 = 2, df2 = 95) = 0.0313, p-val = 0.9692
## 
## Model Results:
## 
##                            estimate      se     tval    pval    ci.lb   ci.ub 
## intrcpt                     -0.6197  0.3609  -1.7171  0.0892  -1.3362  0.0968 
## experienceExperienced        0.0732  0.3855   0.1899  0.8498  -0.6920  0.8384 
## experienceNon-experienced    0.1018  0.4090   0.2488  0.8040  -0.7102  0.9137 
##  
## intrcpt                    . 
## experienceExperienced 
## experienceNon-experienced 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for intubation technique applied (regular, rapid sequence induction)
meta::metareg(mbin_first_random,technique)
## 
## Mixed-Effects Model (k = 98; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.7772 (SE = 0.1456)
## tau (square root of estimated tau^2 value):             0.8816
## I^2 (residual heterogeneity / unaccounted variability): 75.72%
## H^2 (unaccounted variability / sampling variability):   4.12
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 94) = 239.6611, p-val < .0001
## 
## Test of Moderators (coefficients 2:4):
## F(df1 = 3, df2 = 94) = 0.7331, p-val = 0.5348
## 
## Model Results:
## 
##                                            estimate      se     tval    pval 
## intrcpt                                     -0.4843  0.2382  -2.0328  0.0449 
## techniqueRapid Sequence Induction           -0.6807  0.4957  -1.3732  0.1730 
## techniqueRegular                            -0.0284  0.2680  -0.1060  0.9158 
## techniqueRegular/Rapid Sequence Induction    0.1013  0.9476   0.1069  0.9151 
##                                              ci.lb    ci.ub 
## intrcpt                                    -0.9573  -0.0113  * 
## techniqueRapid Sequence Induction          -1.6650   0.3036    
## techniqueRegular                           -0.5605   0.5037    
## techniqueRegular/Rapid Sequence Induction  -1.7802   1.9828    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for setting (operating room, out of hospital, etc.)
meta::metareg(mbin_first_random,set)
## 
## Mixed-Effects Model (k = 98; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.7888 (SE = 0.1479)
## tau (square root of estimated tau^2 value):             0.8881
## I^2 (residual heterogeneity / unaccounted variability): 74.03%
## H^2 (unaccounted variability / sampling variability):   3.85
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 93) = 219.7621, p-val < .0001
## 
## Test of Moderators (coefficients 2:5):
## F(df1 = 4, df2 = 93) = 0.4290, p-val = 0.7873
## 
## Model Results:
## 
##                     estimate      se     tval    pval    ci.lb   ci.ub 
## intrcpt              -0.3377  0.5577  -0.6054  0.5464  -1.4452  0.7699    
## setICU               -0.1962  0.6944  -0.2825  0.7782  -1.5751  1.1828    
## setMultiple           0.2576  0.9859   0.2613  0.7944  -1.7002  2.2155    
## setOperating Room    -0.2550  0.5697  -0.4477  0.6554  -1.3863  0.8763    
## setOut of Hospital    0.2974  0.7393   0.4022  0.6884  -1.1708  1.7655    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for nature of procedure (elective, urgency)
meta::metareg(mbin_first_random,nature)
## 
## Mixed-Effects Model (k = 98; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.7579 (SE = 0.1431)
## tau (square root of estimated tau^2 value):             0.8706
## I^2 (residual heterogeneity / unaccounted variability): 75.45%
## H^2 (unaccounted variability / sampling variability):   4.07
## R^2 (amount of heterogeneity accounted for):            1.49%
## 
## Test for Residual Heterogeneity:
## QE(df = 95) = 199.2822, p-val < .0001
## 
## Test of Moderators (coefficients 2:3):
## F(df1 = 2, df2 = 95) = 2.1749, p-val = 0.1192
## 
## Model Results:
## 
##                 estimate      se     tval    pval    ci.lb    ci.ub 
## intrcpt          -1.2963  0.4421  -2.9320  0.0042  -2.1740  -0.4186  ** 
## natureElective    0.7455  0.4570   1.6312  0.1062  -0.1618   1.6529     
## natureUrgent      1.0725  0.5159   2.0790  0.0403   0.0484   2.0966   * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for inducer used
meta::metareg(mbin_first_random,inducer)
## 
## Mixed-Effects Model (k = 98; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.7062 (SE = 0.1425)
## tau (square root of estimated tau^2 value):             0.8404
## I^2 (residual heterogeneity / unaccounted variability): 74.84%
## H^2 (unaccounted variability / sampling variability):   3.97
## R^2 (amount of heterogeneity accounted for):            8.20%
## 
## Test for Residual Heterogeneity:
## QE(df = 89) = 211.2711, p-val < .0001
## 
## Test of Moderators (coefficients 2:9):
## F(df1 = 8, df2 = 89) = 2.0293, p-val = 0.0518
## 
## Model Results:
## 
##                                estimate      se     tval    pval    ci.lb 
## intrcpt                         -0.4164  0.2056  -2.0251  0.0459  -0.8250 
## inducerEtomidate                -0.5169  0.8396  -0.6157  0.5397  -2.1852 
## inducerMidazolam                 0.4803  0.7148   0.6719  0.5034  -0.9400 
## inducerPropofol                 -0.2239  0.2401  -0.9326  0.3535  -0.7009 
## inducerPropofol or Thiopental    1.5150  1.5796   0.9591  0.3401  -1.6236 
## inducerPropofol/Ketamine        -1.0652  0.8683  -1.2267  0.2232  -2.7905 
## inducerPropofol/Midazolam        1.6044  0.6088   2.6354  0.0099   0.3948 
## inducerPropofol/Thiopental       1.5015  1.5886   0.9452  0.3471  -1.6550 
## inducerThiopental               -0.9277  0.6488  -1.4298  0.1563  -2.2168 
##                                  ci.ub 
## intrcpt                        -0.0078   * 
## inducerEtomidate                1.1514     
## inducerMidazolam                1.9006     
## inducerPropofol                 0.2531     
## inducerPropofol or Thiopental   4.6537     
## inducerPropofol/Ketamine        0.6602     
## inducerPropofol/Midazolam       2.8141  ** 
## inducerPropofol/Thiopental      4.6581     
## inducerThiopental               0.3615     
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for opioid used
meta::metareg(mbin_first_random,opioid)
## Warning in rma.uni(yi = TE[!exclude], sei = seTE[!exclude], data = dataset, :
## Redundant predictors dropped from the model.
## 
## Mixed-Effects Model (k = 98; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.7495 (SE = 0.1434)
## tau (square root of estimated tau^2 value):             0.8657
## I^2 (residual heterogeneity / unaccounted variability): 75.54%
## H^2 (unaccounted variability / sampling variability):   4.09
## R^2 (amount of heterogeneity accounted for):            2.58%
## 
## Test for Residual Heterogeneity:
## QE(df = 93) = 230.1270, p-val < .0001
## 
## Test of Moderators (coefficients 2:5):
## F(df1 = 4, df2 = 93) = 1.4322, p-val = 0.2295
## 
## Model Results:
## 
##                             estimate      se     tval    pval    ci.lb    ci.ub 
## intrcpt                      -0.4532  0.2060  -2.2000  0.0303  -0.8623  -0.0441 
## opioidFentanyl               -0.0519  0.2464  -0.2108  0.8335  -0.5412   0.4373 
## opioidFentanyl/Sulfentanil   -3.1577  1.4690  -2.1496  0.0342  -6.0749  -0.2406 
## opioidRemifentanil           -0.1469  0.3708  -0.3962  0.6928  -0.8833   0.5894 
## opioidSulfentanil            -0.5813  0.5382  -1.0801  0.2829  -1.6500   0.4874 
##  
## intrcpt                     * 
## opioidFentanyl 
## opioidFentanyl/Sulfentanil  * 
## opioidRemifentanil 
## opioidSulfentanil 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for neuromuscular blocking agent used
meta::metareg(mbin_first_random,blocker)
## 
## Mixed-Effects Model (k = 98; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.7516 (SE = 0.1456)
## tau (square root of estimated tau^2 value):             0.8670
## I^2 (residual heterogeneity / unaccounted variability): 75.40%
## H^2 (unaccounted variability / sampling variability):   4.06
## R^2 (amount of heterogeneity accounted for):            2.30%
## 
## Test for Residual Heterogeneity:
## QE(df = 90) = 234.0977, p-val < .0001
## 
## Test of Moderators (coefficients 2:8):
## F(df1 = 7, df2 = 90) = 1.0608, p-val = 0.3953
## 
## Model Results:
## 
##                                    estimate      se     tval    pval    ci.lb 
## intrcpt                             -0.3660  0.1998  -1.8317  0.0703  -0.7629 
## blockerAtracurium                   -0.0260  0.3772  -0.0690  0.9451  -0.7755 
## blockerCisatracurium                -0.0651  0.3907  -0.1667  0.8680  -0.8413 
## blockerRocuronium                   -0.3610  0.2719  -1.3279  0.1876  -0.9011 
## blockerRocuronium/Vecuronium         0.3386  1.4604   0.2318  0.8172  -2.5627 
## blockerSuccinylcholine              -0.7151  0.3733  -1.9158  0.0586  -1.4566 
## blockerSuccinylcholine/Rocuronium    1.4511  1.6345   0.8878  0.3770  -1.7962 
## blockerVecuronium                    0.2195  0.4053   0.5416  0.5894  -0.5856 
##                                     ci.ub 
## intrcpt                            0.0310  . 
## blockerAtracurium                  0.7234    
## blockerCisatracurium               0.7111    
## blockerRocuronium                  0.1791    
## blockerRocuronium/Vecuronium       3.2399    
## blockerSuccinylcholine             0.0265  . 
## blockerSuccinylcholine/Rocuronium  4.6983    
## blockerVecuronium                  1.0246    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Multiple Meta-regression for failed first intubation attempts
str(first)
## 'data.frame':    110 obs. of  18 variables:
##  $ id          : int  1 2 3 4 6 7 8 11 14 15 ...
##  $ author      : Factor w/ 100 levels "Abdallah","Abdelgalel",..: 1 1 2 3 4 5 5 6 7 8 ...
##  $ year        : int  2011 2019 2018 2015 2020 2016 2015 2015 2016 2012 ...
##  $ population  : Factor w/ 5 levels "Elderly","General",..: 4 2 2 2 3 3 2 3 2 2 ...
##  $ predicted   : Factor w/ 3 levels "","Difficult",..: 2 3 2 3 2 2 3 2 3 2 ...
##  $ set         : Factor w/ 5 levels "Emergency department",..: 4 4 2 4 4 4 4 4 4 4 ...
##  $ nature      : Factor w/ 3 levels "","Elective",..: 2 2 3 2 2 2 2 2 2 1 ...
##  $ technique   : Factor w/ 4 levels "","Rapid Sequence Induction",..: 1 3 2 3 3 1 3 3 3 1 ...
##  $ experience  : Factor w/ 3 levels "","Experienced",..: 3 2 2 2 2 2 2 2 3 1 ...
##  $ intervention: Factor w/ 1 level "Videolaryngoscope": 1 1 1 1 1 1 1 1 1 1 ...
##  $ reference   : Factor w/ 1 level "Macintosh": 1 1 1 1 1 1 1 1 1 1 ...
##  $ inducer     : Factor w/ 9 levels "","Etomidate",..: 1 4 6 4 4 1 4 4 4 1 ...
##  $ opioid      : Factor w/ 6 levels "","Fentanyl",..: 1 2 2 2 2 1 1 2 5 1 ...
##  $ blocker     : Factor w/ 8 levels "","Atracurium",..: 1 3 4 4 8 1 4 4 4 1 ...
##  $ ffirst.e1   : int  7 1 5 0 0 0 0 1 25 3 ...
##  $ ffirst.t1   : int  50 35 80 40 40 78 25 45 64 25 ...
##  $ ffirst.e2   : int  4 2 11 1 1 0 0 6 6 9 ...
##  $ ffirst.t2   : int  49 35 40 40 40 96 25 45 22 25 ...
model_first<-metafor::rma.uni(ai=ffirst.e1,n1i = ffirst.t1,ci=ffirst.e2,n2i = ffirst.t2,data = first,method = "ML", measure = "RR", mods = ~experience+technique+population+set+nature+inducer+opioid+blocker,test = "knha")
## Warning in metafor::rma.uni(ai = ffirst.e1, n1i = ffirst.t1, ci = ffirst.e2, :
## Redundant predictors dropped from the model.
model_first
## 
## Mixed-Effects Model (k = 110; tau^2 estimator: ML)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.0364 (SE = 0.0308)
## tau (square root of estimated tau^2 value):             0.1907
## I^2 (residual heterogeneity / unaccounted variability): 7.92%
## H^2 (unaccounted variability / sampling variability):   1.09
## R^2 (amount of heterogeneity accounted for):            91.02%
## 
## Test for Residual Heterogeneity:
## QE(df = 75) = 119.5532, p-val = 0.0008
## 
## Test of Moderators (coefficients 2:35):
## F(df1 = 34, df2 = 75) = 1.9865, p-val = 0.0070
## 
## Model Results:
## 
##                                            estimate      se     tval    pval 
## intrcpt                                     -2.2266  1.3019  -1.7102  0.0914 
## experienceExperienced                       -0.0287  0.4065  -0.0706  0.9439 
## experienceNon-experienced                   -0.0459  0.3950  -0.1163  0.9077 
## techniqueRapid Sequence Induction           -0.8309  0.5032  -1.6514  0.1028 
## techniqueRegular                             1.2929  0.6488   1.9929  0.0499 
## techniqueRegular/Rapid Sequence Induction    0.6291  1.0075   0.6244  0.5343 
## populationGeneral                           -0.1420  0.7775  -0.1826  0.8556 
## populationNeck Immobilization               -0.3989  0.7819  -0.5101  0.6115 
## populationObese                              0.5258  0.9277   0.5668  0.5725 
## populationPregnant Women                     1.1081  1.3569   0.8166  0.4167 
## setICU                                      -0.0017  0.5186  -0.0032  0.9975 
## setMultiple                                 -0.1468  0.5952  -0.2466  0.8059 
## setOperating Room                            0.7786  0.8975   0.8676  0.3884 
## setOut of Hospital                           0.2274  0.5177   0.4393  0.6617 
## natureElective                               1.0591  0.6122   1.7299  0.0878 
## natureUrgent                                 2.4813  0.8522   2.9115  0.0047 
## inducerEtomidate                            -2.1720  1.2643  -1.7179  0.0899 
## inducerMidazolam                            -0.6788  1.1933  -0.5689  0.5711 
## inducerPropofol                             -1.7122  0.9522  -1.7982  0.0762 
## inducerPropofol or Thiopental               -0.0653  2.1681  -0.0301  0.9760 
## inducerPropofol/Ketamine                    -1.3249  1.1592  -1.1430  0.2567 
## inducerPropofol/Midazolam                   -0.0404  0.9844  -0.0410  0.9674 
## inducerPropofol/Thiopental                   1.4908  2.0440   0.7294  0.4680 
## inducerThiopental                           -2.0144  1.1529  -1.7472  0.0847 
## opioidFentanyl                               0.3413  0.4743   0.7195  0.4740 
## opioidFentanyl/Sulfentanil                  -2.6544  1.8297  -1.4508  0.1510 
## opioidMorphine                               0.8896  2.4605   0.3615  0.7187 
## opioidRemifentanil                           0.4856  0.5557   0.8737  0.3851 
## opioidSulfentanil                            0.2547  0.7567   0.3366  0.7374 
## blockerAtracurium                            0.3201  0.6510   0.4917  0.6243 
## blockerCisatracurium                         0.0756  0.6588   0.1148  0.9089 
## blockerRocuronium                            0.2506  0.6047   0.4145  0.6797 
## blockerRocuronium/Vecuronium                 0.6274  1.7918   0.3502  0.7272 
## blockerSuccinylcholine                      -0.2927  0.7235  -0.4046  0.6869 
## blockerVecuronium                            0.0893  0.6203   0.1440  0.8859 
##                                              ci.lb   ci.ub 
## intrcpt                                    -4.8202  0.3670   . 
## experienceExperienced                      -0.8385  0.7811     
## experienceNon-experienced                  -0.8329  0.7410     
## techniqueRapid Sequence Induction          -1.8333  0.1714     
## techniqueRegular                            0.0005  2.5853   * 
## techniqueRegular/Rapid Sequence Induction  -1.3780  2.6361     
## populationGeneral                          -1.6909  1.4070     
## populationNeck Immobilization              -1.9566  1.1588     
## populationObese                            -1.3222  2.3738     
## populationPregnant Women                   -1.5950  3.8111     
## setICU                                     -1.0348  1.0315     
## setMultiple                                -1.3325  1.0390     
## setOperating Room                          -1.0092  2.5665     
## setOut of Hospital                         -0.8038  1.2587     
## natureElective                             -0.1605  2.2787   . 
## natureUrgent                                0.7836  4.1790  ** 
## inducerEtomidate                           -4.6906  0.3467   . 
## inducerMidazolam                           -3.0560  1.6983     
## inducerPropofol                            -3.6091  0.1847   . 
## inducerPropofol or Thiopental              -4.3844  4.2537     
## inducerPropofol/Ketamine                   -3.6341  0.9842     
## inducerPropofol/Midazolam                  -2.0015  1.9208     
## inducerPropofol/Thiopental                 -2.5810  5.5626     
## inducerThiopental                          -4.3110  0.2823   . 
## opioidFentanyl                             -0.6036  1.2862     
## opioidFentanyl/Sulfentanil                 -6.2993  0.9905     
## opioidMorphine                             -4.0120  5.7912     
## opioidRemifentanil                         -0.6215  1.5926     
## opioidSulfentanil                          -1.2527  1.7621     
## blockerAtracurium                          -0.9767  1.6169     
## blockerCisatracurium                       -1.2367  1.3880     
## blockerRocuronium                          -0.9539  1.4552     
## blockerRocuronium/Vecuronium               -2.9421  4.1970     
## blockerSuccinylcholine                     -1.7339  1.1485     
## blockerVecuronium                          -1.1463  1.3250     
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
metafor::permutest(model_first)
## Running 1000 iterations for approximate permutation test.
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |                                                                      |   1%
  |                                                                            
  |=                                                                     |   1%
  |                                                                            
  |=                                                                     |   2%
  |                                                                            
  |==                                                                    |   2%
  |                                                                            
  |==                                                                    |   3%
  |                                                                            
  |==                                                                    |   4%
  |                                                                            
  |===                                                                   |   4%
  |                                                                            
  |===                                                                   |   5%
  |                                                                            
  |====                                                                  |   5%
  |                                                                            
  |====                                                                  |   6%
  |                                                                            
  |=====                                                                 |   6%
  |                                                                            
  |=====                                                                 |   7%
  |                                                                            
  |=====                                                                 |   8%
  |                                                                            
  |======                                                                |   8%
  |                                                                            
  |======                                                                |   9%
  |                                                                            
  |=======                                                               |   9%
  |                                                                            
  |=======                                                               |  10%
  |                                                                            
  |=======                                                               |  11%
  |                                                                            
  |========                                                              |  11%
  |                                                                            
  |========                                                              |  12%
  |                                                                            
  |=========                                                             |  12%
  |                                                                            
  |=========                                                             |  13%
  |                                                                            
  |=========                                                             |  14%
  |                                                                            
  |==========                                                            |  14%
  |                                                                            
  |==========                                                            |  15%
  |                                                                            
  |===========                                                           |  15%
  |                                                                            
  |===========                                                           |  16%
  |                                                                            
  |============                                                          |  16%
  |                                                                            
  |============                                                          |  17%
  |                                                                            
  |============                                                          |  18%
  |                                                                            
  |=============                                                         |  18%
  |                                                                            
  |=============                                                         |  19%
  |                                                                            
  |==============                                                        |  19%
  |                                                                            
  |==============                                                        |  20%
  |                                                                            
  |==============                                                        |  21%
  |                                                                            
  |===============                                                       |  21%
  |                                                                            
  |===============                                                       |  22%
  |                                                                            
  |================                                                      |  22%
  |                                                                            
  |================                                                      |  23%
  |                                                                            
  |================                                                      |  24%
  |                                                                            
  |=================                                                     |  24%
  |                                                                            
  |=================                                                     |  25%
  |                                                                            
  |==================                                                    |  25%
  |                                                                            
  |==================                                                    |  26%
  |                                                                            
  |===================                                                   |  26%
  |                                                                            
  |===================                                                   |  27%
  |                                                                            
  |===================                                                   |  28%
  |                                                                            
  |====================                                                  |  28%
  |                                                                            
  |====================                                                  |  29%
  |                                                                            
  |=====================                                                 |  29%
  |                                                                            
  |=====================                                                 |  30%
  |                                                                            
  |=====================                                                 |  31%
  |                                                                            
  |======================                                                |  31%
  |                                                                            
  |======================                                                |  32%
  |                                                                            
  |=======================                                               |  32%
  |                                                                            
  |=======================                                               |  33%
  |                                                                            
  |=======================                                               |  34%
  |                                                                            
  |========================                                              |  34%
  |                                                                            
  |========================                                              |  35%
  |                                                                            
  |=========================                                             |  35%
  |                                                                            
  |=========================                                             |  36%
  |                                                                            
  |==========================                                            |  36%
  |                                                                            
  |==========================                                            |  37%
  |                                                                            
  |==========================                                            |  38%
  |                                                                            
  |===========================                                           |  38%
  |                                                                            
  |===========================                                           |  39%
  |                                                                            
  |============================                                          |  39%
  |                                                                            
  |============================                                          |  40%
  |                                                                            
  |============================                                          |  41%
  |                                                                            
  |=============================                                         |  41%
  |                                                                            
  |=============================                                         |  42%
  |                                                                            
  |==============================                                        |  42%
  |                                                                            
  |==============================                                        |  43%
  |                                                                            
  |==============================                                        |  44%
  |                                                                            
  |===============================                                       |  44%
  |                                                                            
  |===============================                                       |  45%
  |                                                                            
  |================================                                      |  45%
  |                                                                            
  |================================                                      |  46%
  |                                                                            
  |=================================                                     |  46%
  |                                                                            
  |=================================                                     |  47%
  |                                                                            
  |=================================                                     |  48%
  |                                                                            
  |==================================                                    |  48%
  |                                                                            
  |==================================                                    |  49%
  |                                                                            
  |===================================                                   |  49%
  |                                                                            
  |===================================                                   |  50%
  |                                                                            
  |===================================                                   |  51%
  |                                                                            
  |====================================                                  |  51%
  |                                                                            
  |====================================                                  |  52%
  |                                                                            
  |=====================================                                 |  52%
  |                                                                            
  |=====================================                                 |  53%
  |                                                                            
  |=====================================                                 |  54%
  |                                                                            
  |======================================                                |  54%
  |                                                                            
  |======================================                                |  55%
  |                                                                            
  |=======================================                               |  55%
  |                                                                            
  |=======================================                               |  56%
  |                                                                            
  |========================================                              |  56%
  |                                                                            
  |========================================                              |  57%
  |                                                                            
  |========================================                              |  58%
  |                                                                            
  |=========================================                             |  58%
  |                                                                            
  |=========================================                             |  59%
  |                                                                            
  |==========================================                            |  59%
  |                                                                            
  |==========================================                            |  60%
  |                                                                            
  |==========================================                            |  61%
  |                                                                            
  |===========================================                           |  61%
  |                                                                            
  |===========================================                           |  62%
  |                                                                            
  |============================================                          |  62%
  |                                                                            
  |============================================                          |  63%
  |                                                                            
  |============================================                          |  64%
  |                                                                            
  |=============================================                         |  64%
  |                                                                            
  |=============================================                         |  65%
  |                                                                            
  |==============================================                        |  65%
  |                                                                            
  |==============================================                        |  66%
  |                                                                            
  |===============================================                       |  66%
  |                                                                            
  |===============================================                       |  67%
  |                                                                            
  |===============================================                       |  68%
  |                                                                            
  |================================================                      |  68%
  |                                                                            
  |================================================                      |  69%
  |                                                                            
  |=================================================                     |  69%
  |                                                                            
  |=================================================                     |  70%
  |                                                                            
  |=================================================                     |  71%
  |                                                                            
  |==================================================                    |  71%
  |                                                                            
  |==================================================                    |  72%
  |                                                                            
  |===================================================                   |  72%
  |                                                                            
  |===================================================                   |  73%
  |                                                                            
  |===================================================                   |  74%
  |                                                                            
  |====================================================                  |  74%
  |                                                                            
  |====================================================                  |  75%
  |                                                                            
  |=====================================================                 |  75%
  |                                                                            
  |=====================================================                 |  76%
  |                                                                            
  |======================================================                |  76%
  |                                                                            
  |======================================================                |  77%
  |                                                                            
  |======================================================                |  78%
  |                                                                            
  |=======================================================               |  78%
  |                                                                            
  |=======================================================               |  79%
  |                                                                            
  |========================================================              |  79%
  |                                                                            
  |========================================================              |  80%
  |                                                                            
  |========================================================              |  81%
  |                                                                            
  |=========================================================             |  81%
  |                                                                            
  |=========================================================             |  82%
  |                                                                            
  |==========================================================            |  82%
  |                                                                            
  |==========================================================            |  83%
  |                                                                            
  |==========================================================            |  84%
  |                                                                            
  |===========================================================           |  84%
  |                                                                            
  |===========================================================           |  85%
  |                                                                            
  |============================================================          |  85%
  |                                                                            
  |============================================================          |  86%
  |                                                                            
  |=============================================================         |  86%
  |                                                                            
  |=============================================================         |  87%
  |                                                                            
  |=============================================================         |  88%
  |                                                                            
  |==============================================================        |  88%
  |                                                                            
  |==============================================================        |  89%
  |                                                                            
  |===============================================================       |  89%
  |                                                                            
  |===============================================================       |  90%
  |                                                                            
  |===============================================================       |  91%
  |                                                                            
  |================================================================      |  91%
  |                                                                            
  |================================================================      |  92%
  |                                                                            
  |=================================================================     |  92%
  |                                                                            
  |=================================================================     |  93%
  |                                                                            
  |=================================================================     |  94%
  |                                                                            
  |==================================================================    |  94%
  |                                                                            
  |==================================================================    |  95%
  |                                                                            
  |===================================================================   |  95%
  |                                                                            
  |===================================================================   |  96%
  |                                                                            
  |====================================================================  |  96%
  |                                                                            
  |====================================================================  |  97%
  |                                                                            
  |====================================================================  |  98%
  |                                                                            
  |===================================================================== |  98%
  |                                                                            
  |===================================================================== |  99%
  |                                                                            
  |======================================================================|  99%
  |                                                                            
  |======================================================================| 100%
## 
## Test of Moderators (coefficients 2:35):
## F(df1 = 34, df2 = 75) = 1.9865, p-val* = 0.2280
## 
## Model Results:
## 
##                                            estimate      se     tval   pval* 
## intrcpt                                     -2.2266  1.3019  -1.7102  0.1410 
## experienceExperienced                       -0.0287  0.4065  -0.0706  0.9570 
## experienceNon-experienced                   -0.0459  0.3950  -0.1163  0.9200 
## techniqueRapid Sequence Induction           -0.8309  0.5032  -1.6514  0.1370 
## techniqueRegular                             1.2929  0.6488   1.9929  0.0850 
## techniqueRegular/Rapid Sequence Induction    0.6291  1.0075   0.6244  0.5080 
## populationGeneral                           -0.1420  0.7775  -0.1826  0.8700 
## populationNeck Immobilization               -0.3989  0.7819  -0.5101  0.6730 
## populationObese                              0.5258  0.9277   0.5668  0.6530 
## populationPregnant Women                     1.1081  1.3569   0.8166  0.4480 
## setICU                                      -0.0017  0.5186  -0.0032  0.9970 
## setMultiple                                 -0.1468  0.5952  -0.2466  0.8000 
## setOperating Room                            0.7786  0.8975   0.8676  0.4010 
## setOut of Hospital                           0.2274  0.5177   0.4393  0.6580 
## natureElective                               1.0591  0.6122   1.7299  0.1450 
## natureUrgent                                 2.4813  0.8522   2.9115  0.0100 
## inducerEtomidate                            -2.1720  1.2643  -1.7179  0.0950 
## inducerMidazolam                            -0.6788  1.1933  -0.5689  0.5750 
## inducerPropofol                             -1.7122  0.9522  -1.7982  0.0970 
## inducerPropofol or Thiopental               -0.0653  2.1681  -0.0301  0.9830 
## inducerPropofol/Ketamine                    -1.3249  1.1592  -1.1430  0.2730 
## inducerPropofol/Midazolam                   -0.0404  0.9844  -0.0410  0.9650 
## inducerPropofol/Thiopental                   1.4908  2.0440   0.7294  0.4790 
## inducerThiopental                           -2.0144  1.1529  -1.7472  0.1080 
## opioidFentanyl                               0.3413  0.4743   0.7195  0.5400 
## opioidFentanyl/Sulfentanil                  -2.6544  1.8297  -1.4508  0.1600 
## opioidMorphine                               0.8896  2.4605   0.3615  0.7340 
## opioidRemifentanil                           0.4856  0.5557   0.8737  0.4670 
## opioidSulfentanil                            0.2547  0.7567   0.3366  0.7690 
## blockerAtracurium                            0.3201  0.6510   0.4917  0.6860 
## blockerCisatracurium                         0.0756  0.6588   0.1148  0.9220 
## blockerRocuronium                            0.2506  0.6047   0.4145  0.7380 
## blockerRocuronium/Vecuronium                 0.6274  1.7918   0.3502  0.7330 
## blockerSuccinylcholine                      -0.2927  0.7235  -0.4046  0.7320 
## blockerVecuronium                            0.0893  0.6203   0.1440  0.8980 
##                                              ci.lb   ci.ub 
## intrcpt                                    -4.8202  0.3670     
## experienceExperienced                      -0.8385  0.7811     
## experienceNon-experienced                  -0.8329  0.7410     
## techniqueRapid Sequence Induction          -1.8333  0.1714     
## techniqueRegular                            0.0005  2.5853   . 
## techniqueRegular/Rapid Sequence Induction  -1.3780  2.6361     
## populationGeneral                          -1.6909  1.4070     
## populationNeck Immobilization              -1.9566  1.1588     
## populationObese                            -1.3222  2.3738     
## populationPregnant Women                   -1.5950  3.8111     
## setICU                                     -1.0348  1.0315     
## setMultiple                                -1.3325  1.0390     
## setOperating Room                          -1.0092  2.5665     
## setOut of Hospital                         -0.8038  1.2587     
## natureElective                             -0.1605  2.2787     
## natureUrgent                                0.7836  4.1790  ** 
## inducerEtomidate                           -4.6906  0.3467   . 
## inducerMidazolam                           -3.0560  1.6983     
## inducerPropofol                            -3.6091  0.1847   . 
## inducerPropofol or Thiopental              -4.3844  4.2537     
## inducerPropofol/Ketamine                   -3.6341  0.9842     
## inducerPropofol/Midazolam                  -2.0015  1.9208     
## inducerPropofol/Thiopental                 -2.5810  5.5626     
## inducerThiopental                          -4.3110  0.2823     
## opioidFentanyl                             -0.6036  1.2862     
## opioidFentanyl/Sulfentanil                 -6.2993  0.9905     
## opioidMorphine                             -4.0120  5.7912     
## opioidRemifentanil                         -0.6215  1.5926     
## opioidSulfentanil                          -1.2527  1.7621     
## blockerAtracurium                          -0.9767  1.6169     
## blockerCisatracurium                       -1.2367  1.3880     
## blockerRocuronium                          -0.9539  1.4552     
## blockerRocuronium/Vecuronium               -2.9421  4.1970     
## blockerSuccinylcholine                     -1.7339  1.1485     
## blockerVecuronium                          -1.1463  1.3250     
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
model_first.2<-metafor::rma.uni(ai=ffirst.e1,n1i = ffirst.t1,ci=ffirst.e2,n2i = ffirst.t2,data = first,method = "ML", measure = "RR", mods = ~technique+nature+inducer,test = "knha")
model_first.2
## 
## Mixed-Effects Model (k = 110; tau^2 estimator: ML)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.1090 (SE = 0.0527)
## tau (square root of estimated tau^2 value):             0.3302
## I^2 (residual heterogeneity / unaccounted variability): 27.48%
## H^2 (unaccounted variability / sampling variability):   1.38
## R^2 (amount of heterogeneity accounted for):            73.09%
## 
## Test for Residual Heterogeneity:
## QE(df = 96) = 142.1464, p-val = 0.0016
## 
## Test of Moderators (coefficients 2:14):
## F(df1 = 13, df2 = 96) = 3.9002, p-val < .0001
## 
## Model Results:
## 
##                                            estimate      se     tval    pval 
## intrcpt                                     -1.4754  0.3067  -4.8103  <.0001 
## techniqueRapid Sequence Induction           -0.8589  0.3807  -2.2564  0.0263 
## techniqueRegular                             1.3342  0.5286   2.5240  0.0132 
## techniqueRegular/Rapid Sequence Induction    1.4460  0.8618   1.6778  0.0966 
## natureElective                               0.7236  0.3994   1.8119  0.0731 
## natureUrgent                                 1.5924  0.3524   4.5186  <.0001 
## inducerEtomidate                            -1.5157  0.7623  -1.9883  0.0496 
## inducerMidazolam                            -0.1403  0.7972  -0.1760  0.8606 
## inducerPropofol                             -1.0772  0.5092  -2.1154  0.0370 
## inducerPropofol or Thiopental                0.5162  1.7954   0.2875  0.7743 
## inducerPropofol/Ketamine                    -0.7396  0.7181  -1.0299  0.3056 
## inducerPropofol/Midazolam                    0.5222  0.6889   0.7581  0.4503 
## inducerPropofol/Thiopental                   1.8272  1.7683   1.0333  0.3041 
## inducerThiopental                           -1.6232  0.7204  -2.2533  0.0265 
##                                              ci.lb    ci.ub 
## intrcpt                                    -2.0843  -0.8666  *** 
## techniqueRapid Sequence Induction          -1.6146  -0.1033    * 
## techniqueRegular                            0.2849   2.3834    * 
## techniqueRegular/Rapid Sequence Induction  -0.2647   3.1567    . 
## natureElective                             -0.0691   1.5164    . 
## natureUrgent                                0.8928   2.2919  *** 
## inducerEtomidate                           -3.0289  -0.0025    * 
## inducerMidazolam                           -1.7227   1.4421      
## inducerPropofol                            -2.0880  -0.0664    * 
## inducerPropofol or Thiopental              -3.0477   4.0801      
## inducerPropofol/Ketamine                   -2.1650   0.6859      
## inducerPropofol/Midazolam                  -0.8453   1.8897      
## inducerPropofol/Thiopental                 -1.6829   5.3373      
## inducerThiopental                          -3.0531  -0.1933    * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
model_first.3<-metafor::rma.uni(ai=ffirst.e1,n1i = ffirst.t1,ci=ffirst.e2,n2i = ffirst.t2,data = first,method = "ML", measure = "RR", mods = ~nature,test = "knha")
model_first.3
## 
## Mixed-Effects Model (k = 110; tau^2 estimator: ML)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.3055 (SE = 0.0966)
## tau (square root of estimated tau^2 value):             0.5527
## I^2 (residual heterogeneity / unaccounted variability): 52.59%
## H^2 (unaccounted variability / sampling variability):   2.11
## R^2 (amount of heterogeneity accounted for):            24.58%
## 
## Test for Residual Heterogeneity:
## QE(df = 107) = 200.2237, p-val < .0001
## 
## Test of Moderators (coefficients 2:3):
## F(df1 = 2, df2 = 107) = 3.7581, p-val = 0.0265
## 
## Model Results:
## 
##                 estimate      se     tval    pval    ci.lb    ci.ub 
## intrcpt          -1.3623  0.3736  -3.6469  0.0004  -2.1028  -0.6218  *** 
## natureElective    0.8814  0.3874   2.2754  0.0249   0.1135   1.6494    * 
## natureUrgent      1.1741  0.4283   2.7410  0.0072   0.3249   2.0232   ** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Subgroup analysis for failed first intubation attempt by induction technique, nature of the procedure, and inducer
sub_first_analysis<-first_analysis[first_analysis$technique=="Regular" & first_analysis$nature=="Elective" & first_analysis$inducer=="Propofol",]
mbin_sub_first_random<-meta::metabin(ffirst.e1,ffirst.t1,ffirst.e2,ffirst.t2,data = sub_first_analysis,studlab = paste(author, year, id),comb.fixed = FALSE,comb.random = TRUE,method.tau = "SJ",hakn = TRUE,prediction = TRUE,incr = 0.5,sm = "RR")
mbin_sub_first_random
##                               RR            95%-CI %W(random)
## Abdallah 2019 2           0.5000 [0.0475;  5.2653]        1.0
## Abdelgawad 2015 4         0.3333 [0.0140;  7.9424]        0.7
## Agrawal 2020 6            0.3333 [0.0140;  7.9424]        0.7
## Akbar 2015 11             0.1667 [0.0209;  1.3291]        1.2
## Al - Ghamdi 2016 14       1.4323 [0.6780;  3.0256]        2.6
## Ali 2017 17               0.3333 [0.0367;  3.0260]        1.1
## Altun 2018 18             0.8333 [0.3261;  2.1296]        2.4
## Ander 2017 21             0.0909 [0.0052;  1.5894]        0.8
## Aoi 2010 23               1.0000 [0.2946;  3.3948]        2.0
## Aqil 2016 24              0.8571 [0.3158;  2.3264]        2.3
## Aqil 2017 25              0.4000 [0.1648;  0.9710]        2.4
## Arslan 2017 29            3.5217 [0.1864; 66.5522]        0.7
## Bakshi 2019 33            0.9730 [0.0632; 14.9718]        0.8
## Barak 2007 34             0.4500 [0.1469;  1.3788]        2.1
## Bashir 2020 36            0.2857 [0.0632;  1.2922]        1.7
## Colak 2019 38             0.2500 [0.0291;  2.1505]        1.2
## El - Tahan 2018 39        2.0594 [0.7770;  5.4582]        2.3
## Huang 2020 40             2.7119 [1.2771;  5.7587]        2.6
## Wasem 2013 42             0.5000 [0.0989;  2.5270]        1.6
## Bhalla 2018 44            2.0000 [0.4291;  9.3210]        1.7
## Bharti 2014 46            0.3167 [0.0360;  2.7855]        1.1
## Cakir 2020 53             0.3333 [0.0141;  7.8748]        0.7
## Carlino 2009 55           0.0667 [0.0042;  1.0690]        0.8
## Di Marco 2011 67          0.6364 [0.2668;  1.5179]        2.5
## Erden 2010 70             3.0000 [0.1315; 68.4178]        0.7
## Gunes 2020 76             0.2857 [0.0610;  1.3382]        1.7
## Gupta 2020 77             0.5000 [0.0472;  5.2961]        1.0
## Hosalli 2017 81           0.4286 [0.1223;  1.5022]        2.0
## Hsu 2012 82               0.1111 [0.0062;  1.9760]        0.8
## Hu 2017 83                0.1920 [0.0093;  3.9489]        0.7
## Kaur 2020 87              0.1000 [0.0121;  0.8275]        1.2
## Kim 2013 91               0.1160 [0.0066;  2.0345]        0.8
## Kleine-Brueggeney 2017 93 0.4831 [0.3851;  0.6060]        3.1
## Koh 2010 94               0.0667 [0.0095;  0.4671]        1.3
## Kunaz 2016 99             1.3333 [0.3144;  5.6542]        1.8
## Laosuwan 2015 101         2.0000 [0.2107; 18.9807]        1.1
## Lim 2005 105              0.5000 [0.0989;  2.5270]        1.6
## Lin 2012 106              0.4000 [0.1865;  0.8577]        2.6
## Liu 2014 108              0.1000 [0.0134;  0.7451]        1.3
## Maharaj 2006 113          0.3333 [0.0141;  7.8648]        0.7
## Maharaj 2007 114          0.3333 [0.0144;  7.7130]        0.7
## Maharaj 2008 115          0.1429 [0.0193;  1.0572]        1.3
## Malik 2008 117            0.9167 [0.3153;  2.6649]        2.2
## Malik1 2009 118           0.6250 [0.2819;  1.3856]        2.5
## Malik2 2009 119           2.0000 [0.1914; 20.8980]        1.0
## Mcelwain 2011 123         0.3563 [0.1087;  1.1685]        2.1
## Myunghun-Kim 2017 125     3.0000 [0.1297; 69.4167]        0.7
## Ing 2017 128              2.7273 [0.2815; 26.4214]        1.1
## Peirovifar 2014 137       0.5000 [0.1789;  1.3975]        2.3
## Reena 2019 141            0.3077 [0.1077;  0.8792]        2.2
## Risse 2020 142            1.5196 [0.3955;  5.8387]        1.9
## Sargin 2016 145           0.0667 [0.0039;  1.1365]        0.8
## Shah 2016 147             0.2857 [0.1062;  0.7684]        2.3
## Takenaka 2011 153         0.0883 [0.0051;  1.5379]        0.8
## Taylor 2013 154           0.0270 [0.0017;  0.4348]        0.8
## Teoh 2010 156             3.5000 [0.8354; 14.6640]        1.8
## Tsan 2020 159             0.3333 [0.0355;  3.1262]        1.1
## Yoo 2018 167              0.2000 [0.0254;  1.5756]        1.2
## Yumul 2016 168            0.5597 [0.2565;  1.2215]        2.6
## El-Tahan 2017 170         4.6774 [0.2446; 89.4307]        0.7
## Lee 2012 172              3.0000 [1.1854;  7.5925]        2.4
## Zhao 2014 177             0.4223 [0.2800;  0.6370]        3.0
## Ferrando 2011 180         0.1667 [0.0213;  1.3020]        1.2
## Serocki 2013 182          0.7111 [0.2449;  2.0646]        2.2
## Serocki 2010 183          0.7000 [0.2370;  2.0676]        2.2
## 
## Number of studies combined: k = 65
## 
##                          RR           95%-CI     t  p-value
## Random effects model 0.5492 [0.4267; 0.7069] -4.74 < 0.0001
## Prediction interval         [0.1018; 2.9618]               
## 
## Quantifying heterogeneity:
##  tau^2 = 0.6951 [0.0831; 0.7964]; tau = 0.8337 [0.2883; 0.8924];
##  I^2 = 42.2% [22.2%; 57.1%]; H = 1.32 [1.13; 1.53]
## 
## Test of heterogeneity:
##       Q d.f. p-value
##  110.70   64  0.0003
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Continuity correction of 0.5 in studies with zero cell frequencies
length(sub_first_analysis$ffirst.e1)
## [1] 65
sum(sub_first_analysis$ffirst.t1,sub_first_analysis$ffirst.t2)
## [1] 6067
#Funnel Plot for failed first intubation attempt
meta::funnel(mbin_first_random,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))
meta::metabias(mbin_first_random, method.bias = "linreg")
## 
##  Linear regression test of funnel plot asymmetry
## 
## data:  mbin_first_random
## t = -1.5982, df = 96, p-value = 0.1133
## alternative hypothesis: asymmetry in funnel plot
## sample estimates:
##       bias    se.bias  intercept 
## -0.4185188  0.2618609 -0.2533662
dmetar::eggers.test(mbin_first_random)
##              Intercept ConfidenceInterval      t       p
## Egger's test    -0.419       -1.007-0.169 -1.598 0.11327
meta::funnel(mbin_first_random,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))

Failed Intubation Within 2 Attempts

fia.2<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Table Pairwise 2 attempts.csv")
length(fia.2$ftent.e1)
## [1] 93
#Number of comparisons with zero events in both arms
fia.2_zeros<-dplyr::filter(fia.2,fia.2$ftent.e1==0 & fia.2$ftent.e2==0)
length(fia.2_zeros$ftent.e1)
## [1] 48
#Table for Meta-analysis of failed intubation within 2 attempts
fia.2_analysis<-dplyr::filter(fia.2,fia.2$ftent.e1>0 | fia.2$ftent.e2>0)

#Number of comparisons and patients meta-analized for failed intubation within 2 attempts
length(fia.2_analysis$ftent.e1)
## [1] 45
sum(fia.2_analysis$ftent.t1,fia.2_analysis$ftent.t2)
## [1] 5369
#Meta-analysis for failed intubation within 2 attempts
mbin_fia.2_fixed<-meta::metabin(ftent.e1,ftent.t1,ftent.e2,ftent.t2,data = fia.2_analysis,studlab = paste(author, year),comb.fixed = TRUE,comb.random = FALSE,method.tau = "SJ",hakn = TRUE,prediction = TRUE,incr = 0.5,sm = "RR")
mbin_fia.2_fixed
##                      RR             95%-CI %W(fixed)
## Abdallah 2011    6.8614 [0.3638; 129.4185]       0.3
## Abdelgalel 2018  0.1667 [0.0179;   1.5517]       2.2
## Akbar 2015       0.2000 [0.0099;   4.0511]       1.4
## Ali 2012         0.3333 [0.0371;   2.9910]       1.6
## Ali 2017         0.3333 [0.0141;   7.8648]       0.8
## Altun 2018       0.3496 [0.0607;   2.0132]       2.2
## Andersen 2011    0.2000 [0.0098;   4.0624]       1.4
## Aoi 2010         1.0000 [0.0676;  14.7865]       0.5
## Arici 2014       5.0000 [0.2477; 100.9273]       0.3
## Barak 2007       0.5625 [0.0520;   6.0868]       1.0
## El - Tahan 2018  5.4433 [0.3229;  91.7505]       0.4
## Huang 2020       1.5254 [0.4456;   5.2215]       2.2
## Wasem 2013       1.0000 [0.0655;  15.2598]       0.5
## Bhalla 2018      7.0000 [0.3936; 124.4923]       0.3
## Bharti 2014      0.3171 [0.0137;   7.3255]       0.8
## Dey 2020         0.1528 [0.0468;   0.4992]      10.9
## Erden 2010       3.0000 [0.1315;  68.4178]       0.3
## Goksu 2016       0.6000 [0.2297;   1.5673]       5.5
## Griesdale 2012   1.5000 [0.2800;   8.0356]       1.1
## Hirabayashi 2009 0.0404 [0.0055;   0.2965]      13.4
## Hirabayashi 2010 0.1000 [0.0130;   0.7666]       5.5
## Hsu 2012         0.2000 [0.0100;   3.9955]       1.4
## Kido 2015        0.3333 [0.0142;   7.8035]       0.8
## Koh 2010         0.2500 [0.0300;   2.0828]       2.2
## Lascarrou 2017   0.9117 [0.4129;   2.0127]       6.6
## Lin 2012         0.1647 [0.0203;   1.3378]       3.3
## Liu 2019         0.0481 [0.0028;   0.8155]       5.7
## Macke 2020       0.1429 [0.0075;   2.7190]       1.9
## Maharaj 2006     0.3333 [0.0141;   7.8648]       0.8
## Maharaj 2007     0.3333 [0.0144;   7.7130]       0.8
## Maharaj 2008     0.1111 [0.0064;   1.9341]       2.5
## Malik 2008       0.3333 [0.0491;   2.2644]       1.6
## Malik1 2009      0.0561 [0.0031;   1.0022]       3.3
## Mcelwain 2011    0.2672 [0.0252;   2.8318]       1.4
## Nishiyama 2011   1.8160 [0.4229;   7.7993]       1.6
## Parasa 2016      2.9048 [0.1230;  68.5796]       0.3
## Peirovifar 2014  0.5000 [0.1789;   1.3975]       4.4
## Ranieri 2012     0.3139 [0.0130;   7.5664]       0.8
## Risse 2020       2.7353 [0.3000;  24.9395]       0.6
## Ruetzeler 2020   0.3818 [0.0769;   1.8970]       2.8
## Shah 2016        0.4833 [0.0463;   5.0456]       1.1
## Shukla 2017      0.1429 [0.0076;   2.6781]       1.9
## Teoh 2010        1.0033 [0.0412;  24.4334]       0.4
## Lee 2012         9.0000 [1.2883;  62.8753]       0.8
## Serocki 2013     2.5591 [0.1265;  51.7504]       0.4
## 
## Number of studies combined: k = 45
## 
##                         RR           95%-CI     z  p-value
## Fixed effect model  0.5230 [0.4103; 0.6668] -5.23 < 0.0001
## Prediction interval        [0.0663; 4.1835]               
## 
## Quantifying heterogeneity:
##  tau^2 = 1.0170 [0.0000; 1.4232]; tau = 1.0084 [0.0000; 1.1930];
##  I^2 = 27.0% [0.0%; 49.8%]; H = 1.17 [1.00; 1.41]
## 
## Test of heterogeneity:
##      Q d.f. p-value
##  60.30   44  0.0517
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Continuity correction of 0.5 in studies with zero cell frequencies
mbin_fia.2_random<-meta::metabin(ftent.e1,ftent.t1,ftent.e2,ftent.t2,data = fia.2_analysis,studlab = paste(author, year),comb.fixed = FALSE,comb.random = TRUE,method.tau = "SJ",hakn = TRUE,prediction = TRUE,incr = 0.5,sm = "RR")
mbin_fia.2_random
##                      RR             95%-CI %W(random)
## Abdallah 2011    6.8614 [0.3638; 129.4185]        1.6
## Abdelgalel 2018  0.1667 [0.0179;   1.5517]        2.3
## Akbar 2015       0.2000 [0.0099;   4.0511]        1.6
## Ali 2012         0.3333 [0.0371;   2.9910]        2.3
## Ali 2017         0.3333 [0.0141;   7.8648]        1.5
## Altun 2018       0.3496 [0.0607;   2.0132]        2.9
## Andersen 2011    0.2000 [0.0098;   4.0624]        1.6
## Aoi 2010         1.0000 [0.0676;  14.7865]        1.8
## Arici 2014       5.0000 [0.2477; 100.9273]        1.6
## Barak 2007       0.5625 [0.0520;   6.0868]        2.1
## El - Tahan 2018  5.4433 [0.3229;  91.7505]        1.7
## Huang 2020       1.5254 [0.4456;   5.2215]        3.8
## Wasem 2013       1.0000 [0.0655;  15.2598]        1.8
## Bhalla 2018      7.0000 [0.3936; 124.4923]        1.7
## Bharti 2014      0.3171 [0.0137;   7.3255]        1.5
## Dey 2020         0.1528 [0.0468;   0.4992]        3.9
## Erden 2010       3.0000 [0.1315;  68.4178]        1.5
## Goksu 2016       0.6000 [0.2297;   1.5673]        4.2
## Griesdale 2012   1.5000 [0.2800;   8.0356]        3.0
## Hirabayashi 2009 0.0404 [0.0055;   0.2965]        2.6
## Hirabayashi 2010 0.1000 [0.0130;   0.7666]        2.5
## Hsu 2012         0.2000 [0.0100;   3.9955]        1.6
## Kido 2015        0.3333 [0.0142;   7.8035]        1.5
## Koh 2010         0.2500 [0.0300;   2.0828]        2.4
## Lascarrou 2017   0.9117 [0.4129;   2.0127]        4.5
## Lin 2012         0.1647 [0.0203;   1.3378]        2.5
## Liu 2019         0.0481 [0.0028;   0.8155]        1.7
## Macke 2020       0.1429 [0.0075;   2.7190]        1.6
## Maharaj 2006     0.3333 [0.0141;   7.8648]        1.5
## Maharaj 2007     0.3333 [0.0144;   7.7130]        1.5
## Maharaj 2008     0.1111 [0.0064;   1.9341]        1.7
## Malik 2008       0.3333 [0.0491;   2.2644]        2.7
## Malik1 2009      0.0561 [0.0031;   1.0022]        1.7
## Mcelwain 2011    0.2672 [0.0252;   2.8318]        2.2
## Nishiyama 2011   1.8160 [0.4229;   7.7993]        3.4
## Parasa 2016      2.9048 [0.1230;  68.5796]        1.5
## Peirovifar 2014  0.5000 [0.1789;   1.3975]        4.1
## Ranieri 2012     0.3139 [0.0130;   7.5664]        1.5
## Risse 2020       2.7353 [0.3000;  24.9395]        2.3
## Ruetzeler 2020   0.3818 [0.0769;   1.8970]        3.2
## Shah 2016        0.4833 [0.0463;   5.0456]        2.2
## Shukla 2017      0.1429 [0.0076;   2.6781]        1.6
## Teoh 2010        1.0033 [0.0412;  24.4334]        1.5
## Lee 2012         9.0000 [1.2883;  62.8753]        2.7
## Serocki 2013     2.5591 [0.1265;  51.7504]        1.6
## 
## Number of studies combined: k = 45
## 
##                          RR           95%-CI     t p-value
## Random effects model 0.5267 [0.3540; 0.7838] -3.25  0.0022
## Prediction interval         [0.0663; 4.1835]              
## 
## Quantifying heterogeneity:
##  tau^2 = 1.0170 [0.0000; 1.4232]; tau = 1.0084 [0.0000; 1.1930];
##  I^2 = 27.0% [0.0%; 49.8%]; H = 1.17 [1.00; 1.41]
## 
## Test of heterogeneity:
##      Q d.f. p-value
##  60.30   44  0.0517
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Continuity correction of 0.5 in studies with zero cell frequencies
#Estimated probability of failed intubation within 2 attempts with Macintosh
meta::metaprop(event = ftent.e2,n = ftent.t2 ,studlab = paste(author,year),data = fia.2,method = "GLMM",sm = "PLOGIT",comb.fixed = FALSE,comb.random = TRUE,hakn = TRUE)
##                          proportion           95%-CI
## Abdallah 2011                0.0000 [0.0000; 0.0725]
## Abdallah 2019                0.0000 [0.0000; 0.1000]
## Abdelgalel 2018              0.0750 [0.0157; 0.2039]
## Abdelgawad 2015              0.0000 [0.0000; 0.1684]
## Agrawal 2020                 0.0000 [0.0000; 0.0881]
## Ahmad 2016                   0.0000 [0.0000; 0.0377]
## Ahmad 2015                   0.0000 [0.0000; 0.1372]
## Akbar 2015                   0.0444 [0.0054; 0.1515]
## Al-Ghamdi 2016               0.0000 [0.0000; 0.1544]
## Ali 2012                     0.1200 [0.0255; 0.3122]
## Ali 2017                     0.0333 [0.0008; 0.1722]
## Altun 2018                   0.0698 [0.0146; 0.1906]
## Andersen 2011                0.0400 [0.0049; 0.1371]
## Aoi 2010                     0.0556 [0.0014; 0.2729]
## Aqil 2016                    0.0000 [0.0000; 0.0881]
## Aqil 2017                    0.0000 [0.0000; 0.0513]
## Arici 2014                   0.0000 [0.0000; 0.0881]
## Arslan 2017                  0.0000 [0.0000; 0.0881]
## Bakshi 2019                  0.0000 [0.0000; 0.0974]
## Barak 2007                   0.0222 [0.0027; 0.0780]
## Barman 2017                  0.0000 [0.0000; 0.1000]
## Bashir 2020                  0.0000 [0.0000; 0.0881]
## Colak 2019                   0.0000 [0.0000; 0.0787]
## El - Tahan 2018              0.0000 [0.0000; 0.1089]
## Huang 2020                   0.1000 [0.0211; 0.2653]
## Wasem 2013                   0.0333 [0.0008; 0.1722]
## Yao 2015                     0.0000 [0.0000; 0.0740]
## Bhalla 2018                  0.0000 [0.0000; 0.2180]
## Bharti 2014                  0.0526 [0.0013; 0.2603]
## Bhat 2015                    0.0000 [0.0000; 0.0711]
## Bilehjani 2009               0.0000 [0.0000; 0.0925]
## Blajic 2019                  0.0000 [0.0000; 0.0606]
## Cakir 2020                   0.0000 [0.0000; 0.1122]
## Carlino 2009                 0.0000 [0.0000; 0.2180]
## Dey 2020                     0.1818 [0.1147; 0.2667]
## Erden 2010                   0.0000 [0.0000; 0.2059]
## Erturk 2015                  0.0000 [0.0000; 0.0881]
## Goksu 2016                   0.1333 [0.0658; 0.2316]
## Griesdale 2012               0.1000 [0.0123; 0.3170]
## Gunes 2020                   0.0000 [0.0000; 0.0402]
## Gupta 2020                   0.0000 [0.0000; 0.0881]
## Hirabayashi 2009             0.0938 [0.0610; 0.1363]
## Hirabayashi 2010             0.1000 [0.0490; 0.1762]
## Hsu 2012                     0.0667 [0.0082; 0.2207]
## Hu 2017                      0.0000 [0.0000; 0.0377]
## Kaur 2020                    0.0000 [0.0000; 0.0881]
## Kido 2015                    0.0400 [0.0010; 0.2035]
## Kim 2013                     0.0000 [0.0000; 0.1482]
## Kleine-Brueggeney 2017       0.0000 [0.0000; 0.0303]
## Koh 2010                     0.1600 [0.0454; 0.3608]
## K\x9f\x8d\x9fkosman 2020     0.0000 [0.0000; 0.1157]
## Laosuwan 2015                0.0000 [0.0000; 0.2849]
## Lascarrou 2017               0.0659 [0.0345; 0.1123]
## Lim 2005                     0.0000 [0.0000; 0.1157]
## Lin 2012                     0.0732 [0.0273; 0.1525]
## Liu 2019                     0.0552 [0.0268; 0.0993]
## Macke 2020                   0.0395 [0.0082; 0.1111]
## Maharaj 2006                 0.0333 [0.0008; 0.1722]
## Maharaj 2007                 0.0500 [0.0013; 0.2487]
## Maharaj 2008                 0.2000 [0.0573; 0.4366]
## Malik 2008                   0.0667 [0.0082; 0.2207]
## Malik1 2009                  0.1600 [0.0454; 0.3608]
## Malik2 2009                  0.0000 [0.0000; 0.1157]
## Mcelwain 2011                0.0645 [0.0079; 0.2142]
## Myunghun-Kim 2017            0.0000 [0.0000; 0.1684]
## Kulkarni 2013                0.0000 [0.0000; 0.1157]
## Inangil 2018                 0.0000 [0.0000; 0.1000]
## Ing 2017                     0.0000 [0.0000; 0.2059]
## Jafra 2018                   0.0000 [0.0000; 0.0362]
## Ndoko 2008                   0.0000 [0.0000; 0.0672]
## Nishiyama 2011               0.0571 [0.0070; 0.1916]
## Parasa 2016                  0.0000 [0.0000; 0.1157]
## Pazur 2016                   0.0000 [0.0000; 0.1323]
## Peirovifar 2014              0.4000 [0.1912; 0.6395]
## Pournajafian 2014            0.0000 [0.0000; 0.0725]
## Ranieri 2012                 0.0156 [0.0004; 0.0840]
## Reena 2019                   0.0000 [0.0000; 0.0711]
## Risse 2020                   0.0323 [0.0008; 0.1670]
## Ruetzeler 2020               0.0794 [0.0263; 0.1756]
## Sargin 2016                  0.0000 [0.0000; 0.0711]
## Shah 2016                    0.0690 [0.0085; 0.2277]
## Shukla 2017                  0.0750 [0.0157; 0.2039]
## Takenaka 2011                0.0000 [0.0000; 0.1028]
## Teoh 2010                    0.0000 [0.0000; 0.0362]
## Tsan 2020                    0.0000 [0.0000; 0.0521]
## Varsha 2019                  0.0000 [0.0000; 0.1000]
## Vijayakumar 2016             0.0000 [0.0000; 0.0787]
## Yoo 2018                     0.0000 [0.0000; 0.1544]
## El-Tahan 2017                0.0000 [0.0000; 0.2316]
## Lee 2012                     0.0400 [0.0010; 0.2035]
## Ferrando 2011                0.0000 [0.0000; 0.1157]
## Serocki 2013                 0.0000 [0.0000; 0.1089]
## Arora 2013                   0.0000 [0.0000; 0.0660]
## 
## Number of studies combined: k = 93
## 
##                      proportion           95%-CI
## Random effects model     0.0109 [0.0059; 0.0199]
## 
## Quantifying heterogeneity:
##  tau^2 = 3.0057; tau = 1.7337; I^2 = 84.3%; H = 2.52
## 
## Test of heterogeneity:
##       Q d.f.  p-value             Test
##   60.90   92   0.9949        Wald-type
##  321.42   92 < 0.0001 Likelihood-Ratio
## 
## Details on meta-analytical method:
## - Random intercept logistic regression model
## - Maximum-likelihood estimator for tau^2
## - Hartung-Knapp adjustment for random effects model
## - Logit transformation
## - Clopper-Pearson confidence interval for individual studies
## - Continuity correction of 0.5 in studies with zero cell frequencies
##   (only used to calculate individual study results)
#Forest plot for failed intubation within 2 attempts
meta::forest(mbin_fia.2_random,sortvar=TE,lab.e="Videolaryngoscopes",lab.c="Macintosh",col.study="black",col.square="black",col.diamond="blue")

meta::forest(mbin_fia.2_random,sortvar=TE,lab.e="Videolaryngoscopes",lab.c="Macintosh",col.study="black",col.square="black",col.diamond="blue")

#Detecting Outliers for failed intubation within 2 attempts
dmetar::find.outliers(mbin_fia.2_random)
## Identified outliers (random-effects model) 
## ------------------------------------------ 
## "Hirabayashi 2009", "Lee 2012" 
##  
## Results with outliers removed 
## ----------------------------- 
##                      RR             95%-CI %W(random) exclude
## Abdallah 2011    6.8614 [0.3638; 129.4185]        1.6        
## Abdelgalel 2018  0.1667 [0.0179;   1.5517]        2.4        
## Akbar 2015       0.2000 [0.0099;   4.0511]        1.6        
## Ali 2012         0.3333 [0.0371;   2.9910]        2.4        
## Ali 2017         0.3333 [0.0141;   7.8648]        1.5        
## Altun 2018       0.3496 [0.0607;   2.0132]        3.1        
## Andersen 2011    0.2000 [0.0098;   4.0624]        1.6        
## Aoi 2010         1.0000 [0.0676;  14.7865]        1.9        
## Arici 2014       5.0000 [0.2477; 100.9273]        1.6        
## Barak 2007       0.5625 [0.0520;   6.0868]        2.2        
## El - Tahan 2018  5.4433 [0.3229;  91.7505]        1.7        
## Huang 2020       1.5254 [0.4456;   5.2215]        4.2        
## Wasem 2013       1.0000 [0.0655;  15.2598]        1.8        
## Bhalla 2018      7.0000 [0.3936; 124.4923]        1.7        
## Bharti 2014      0.3171 [0.0137;   7.3255]        1.5        
## Dey 2020         0.1528 [0.0468;   0.4992]        4.3        
## Erden 2010       3.0000 [0.1315;  68.4178]        1.5        
## Goksu 2016       0.6000 [0.2297;   1.5673]        4.9        
## Griesdale 2012   1.5000 [0.2800;   8.0356]        3.3        
## Hirabayashi 2009 0.0404 [0.0055;   0.2965]        0.0       *
## Hirabayashi 2010 0.1000 [0.0130;   0.7666]        2.7        
## Hsu 2012         0.2000 [0.0100;   3.9955]        1.6        
## Kido 2015        0.3333 [0.0142;   7.8035]        1.5        
## Koh 2010         0.2500 [0.0300;   2.0828]        2.5        
## Lascarrou 2017   0.9117 [0.4129;   2.0127]        5.3        
## Lin 2012         0.1647 [0.0203;   1.3378]        2.6        
## Liu 2019         0.0481 [0.0028;   0.8155]        1.7        
## Macke 2020       0.1429 [0.0075;   2.7190]        1.6        
## Maharaj 2006     0.3333 [0.0141;   7.8648]        1.5        
## Maharaj 2007     0.3333 [0.0144;   7.7130]        1.5        
## Maharaj 2008     0.1111 [0.0064;   1.9341]        1.7        
## Malik 2008       0.3333 [0.0491;   2.2644]        2.9        
## Malik1 2009      0.0561 [0.0031;   1.0022]        1.7        
## Mcelwain 2011    0.2672 [0.0252;   2.8318]        2.2        
## Nishiyama 2011   1.8160 [0.4229;   7.7993]        3.7        
## Parasa 2016      2.9048 [0.1230;  68.5796]        1.5        
## Peirovifar 2014  0.5000 [0.1789;   1.3975]        4.7        
## Ranieri 2012     0.3139 [0.0130;   7.5664]        1.5        
## Risse 2020       2.7353 [0.3000;  24.9395]        2.4        
## Ruetzeler 2020   0.3818 [0.0769;   1.8970]        3.4        
## Shah 2016        0.4833 [0.0463;   5.0456]        2.2        
## Shukla 2017      0.1429 [0.0076;   2.6781]        1.6        
## Teoh 2010        1.0033 [0.0412;  24.4334]        1.4        
## Lee 2012         9.0000 [1.2883;  62.8753]        0.0       *
## Serocki 2013     2.5591 [0.1265;  51.7504]        1.6        
## 
## Number of studies combined: k = 43
## 
##                          RR           95%-CI     t p-value
## Random effects model 0.5237 [0.3639; 0.7536] -3.59  0.0009
## Prediction interval         [0.0847; 3.2364]              
## 
## Quantifying heterogeneity:
##  tau^2 = 0.7808 [0.0000; 0.9110]; tau = 0.8837 [0.0000; 0.9545];
##  I^2 = 8.2% [0.0%; 36.5%]; H = 1.04 [1.00; 1.26]
## 
## Test of heterogeneity:
##      Q d.f. p-value
##  45.77   42  0.3184
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Continuity correction of 0.5 in studies with zero cell frequencies
#Influence Analysis for failed intubation within 2 attempts
inf_analysis_fia.2<-dmetar::InfluenceAnalysis(mbin_fia.2_random,random = TRUE)
## [===========================================================================] DONE
plot(inf_analysis_fia.2,"baujat")

#Meta-regression for failed intubation within 2 attempts

#Controling for population characteristics (general, obese, neck immobilization, pregnant women, elderly)
meta::metareg(mbin_fia.2_random,~population)
## Warning in rma.uni(yi = TE[!exclude], sei = seTE[!exclude], data = dataset, :
## Redundant predictors dropped from the model.
## 
## Mixed-Effects Model (k = 45; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.9936 (SE = 0.3054)
## tau (square root of estimated tau^2 value):             0.9968
## I^2 (residual heterogeneity / unaccounted variability): 49.78%
## H^2 (unaccounted variability / sampling variability):   1.99
## R^2 (amount of heterogeneity accounted for):            2.30%
## 
## Test for Residual Heterogeneity:
## QE(df = 41) = 56.5761, p-val = 0.0535
## 
## Test of Moderators (coefficients 2:4):
## F(df1 = 3, df2 = 41) = 1.2265, p-val = 0.3123
## 
## Model Results:
## 
##                                estimate      se     tval    pval    ci.lb 
## intrcpt                          1.6094  1.5590   1.0324  0.3080  -1.5390 
## populationGeneral               -2.2744  1.5755  -1.4436  0.1565  -5.4562 
## populationNeck Immobilization   -2.7111  1.6382  -1.6549  0.1056  -6.0196 
## populationObese                 -1.7016  1.6834  -1.0108  0.3181  -5.1013 
##                                 ci.ub 
## intrcpt                        4.7579    
## populationGeneral              0.9075    
## populationNeck Immobilization  0.5973    
## populationObese                1.6982    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for operator experience
meta::metareg(mbin_fia.2_random,~experience)
## 
## Mixed-Effects Model (k = 45; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     1.0549 (SE = 0.3100)
## tau (square root of estimated tau^2 value):             1.0271
## I^2 (residual heterogeneity / unaccounted variability): 50.11%
## H^2 (unaccounted variability / sampling variability):   2.00
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 42) = 59.6370, p-val = 0.0378
## 
## Test of Moderators (coefficients 2:3):
## F(df1 = 2, df2 = 42) = 0.1705, p-val = 0.8438
## 
## Model Results:
## 
##                            estimate      se     tval    pval    ci.lb   ci.ub 
## intrcpt                     -0.7657  0.8527  -0.8979  0.3743  -2.4865  0.9552 
## experienceExperienced        0.0529  0.8885   0.0596  0.9528  -1.7401  1.8460 
## experienceNon-experienced    0.3055  0.9301   0.3285  0.7442  -1.5716  2.1827 
##  
## intrcpt 
## experienceExperienced 
## experienceNon-experienced 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for intubation technique (regular, rapid sequence induction)
meta::metareg(mbin_fia.2_random,~technique)
## 
## Mixed-Effects Model (k = 45; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     1.0399 (SE = 0.3132)
## tau (square root of estimated tau^2 value):             1.0197
## I^2 (residual heterogeneity / unaccounted variability): 47.66%
## H^2 (unaccounted variability / sampling variability):   1.91
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 41) = 58.0718, p-val = 0.0405
## 
## Test of Moderators (coefficients 2:4):
## F(df1 = 3, df2 = 41) = 0.6958, p-val = 0.5599
## 
## Model Results:
## 
##                                            estimate      se     tval    pval 
## intrcpt                                     -0.9848  0.4258  -2.3125  0.0258 
## techniqueRapid Sequence Induction           -0.1759  0.7718  -0.2279  0.8209 
## techniqueRegular                             0.5505  0.4922   1.1186  0.2698 
## techniqueRegular/Rapid Sequence Induction    0.0219  1.2019   0.0183  0.9855 
##                                              ci.lb    ci.ub 
## intrcpt                                    -1.8447  -0.1248  * 
## techniqueRapid Sequence Induction          -1.7345   1.3827    
## techniqueRegular                           -0.4434   1.5445    
## techniqueRegular/Rapid Sequence Induction  -2.4054   2.4493    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for setting (operating room, out of hospital, etc.)
meta::metareg(mbin_fia.2_random,~set)
## 
## Mixed-Effects Model (k = 45; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     1.0629 (SE = 0.3209)
## tau (square root of estimated tau^2 value):             1.0310
## I^2 (residual heterogeneity / unaccounted variability): 47.14%
## H^2 (unaccounted variability / sampling variability):   1.89
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 40) = 57.8794, p-val = 0.0334
## 
## Test of Moderators (coefficients 2:5):
## F(df1 = 4, df2 = 40) = 0.5286, p-val = 0.7153
## 
## Model Results:
## 
##                     estimate      se     tval    pval    ci.lb   ci.ub 
## intrcpt              -0.5108  0.9881  -0.5170  0.6080  -2.5078  1.4861    
## setICU               -0.5979  1.1673  -0.5123  0.6113  -2.9571  1.7612    
## setMultiple           0.9163  1.5239   0.6013  0.5510  -2.1636  3.9962    
## setOperating Room    -0.0893  1.0133  -0.0881  0.9302  -2.1373  1.9587    
## setOut of Hospital   -1.4351  1.8617  -0.7709  0.4453  -5.1977  2.3275    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for nature of procedure (elective, urgency)
meta::metareg(mbin_fia.2_random,~nature)
## 
## Mixed-Effects Model (k = 45; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.9435 (SE = 0.2976)
## tau (square root of estimated tau^2 value):             0.9713
## I^2 (residual heterogeneity / unaccounted variability): 46.52%
## H^2 (unaccounted variability / sampling variability):   1.87
## R^2 (amount of heterogeneity accounted for):            7.22%
## 
## Test for Residual Heterogeneity:
## QE(df = 42) = 51.7781, p-val = 0.1434
## 
## Test of Moderators (coefficients 2:3):
## F(df1 = 2, df2 = 42) = 2.9068, p-val = 0.0657
## 
## Model Results:
## 
##                 estimate      se     tval    pval    ci.lb    ci.ub 
## intrcpt          -2.2409  0.6905  -3.2454  0.0023  -3.6343  -0.8474  ** 
## natureElective    1.7364  0.7234   2.4004  0.0209   0.2766   3.1962   * 
## natureUrgent      1.6993  0.8368   2.0306  0.0487   0.0105   3.3881   * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for inducer used
meta::metareg(mbin_fia.2_random,~inducer)
## Warning in rma.uni(yi = TE[!exclude], sei = seTE[!exclude], data = dataset, :
## Redundant predictors dropped from the model.
## 
## Mixed-Effects Model (k = 45; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.9726 (SE = 0.3115)
## tau (square root of estimated tau^2 value):             0.9862
## I^2 (residual heterogeneity / unaccounted variability): 48.11%
## H^2 (unaccounted variability / sampling variability):   1.93
## R^2 (amount of heterogeneity accounted for):            4.37%
## 
## Test for Residual Heterogeneity:
## QE(df = 39) = 52.0048, p-val = 0.0795
## 
## Test of Moderators (coefficients 2:6):
## F(df1 = 5, df2 = 39) = 1.4008, p-val = 0.2454
## 
## Model Results:
## 
##                            estimate      se     tval    pval    ci.lb    ci.ub 
## intrcpt                     -0.9596  0.3739  -2.5663  0.0142  -1.7158  -0.2033 
## inducerEtomidate            -2.0739  1.5220  -1.3626  0.1808  -5.1525   1.0047 
## inducerPropofol              0.4624  0.4492   1.0294  0.3096  -0.4462   1.3710 
## inducerPropofol/Ketamine    -0.8322  1.3248  -0.6282  0.5336  -3.5119   1.8475 
## inducerPropofol/Midazolam    1.6967  0.9493   1.7873  0.0817  -0.2235   3.6169 
## inducerThiopental            0.2966  0.9592   0.3092  0.7588  -1.6436   2.2369 
##  
## intrcpt                    * 
## inducerEtomidate 
## inducerPropofol 
## inducerPropofol/Ketamine 
## inducerPropofol/Midazolam  . 
## inducerThiopental 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for opioid used
meta::metareg(mbin_fia.2_random,~opioid)
## Warning in rma.uni(yi = TE[!exclude], sei = seTE[!exclude], data = dataset, :
## Redundant predictors dropped from the model.
## 
## Mixed-Effects Model (k = 45; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     1.0839 (SE = 0.3171)
## tau (square root of estimated tau^2 value):             1.0411
## I^2 (residual heterogeneity / unaccounted variability): 50.96%
## H^2 (unaccounted variability / sampling variability):   2.04
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 41) = 60.1829, p-val = 0.0269
## 
## Test of Moderators (coefficients 2:4):
## F(df1 = 3, df2 = 41) = 0.0771, p-val = 0.9720
## 
## Model Results:
## 
##                     estimate      se     tval    pval    ci.lb   ci.ub 
## intrcpt              -0.7258  0.3750  -1.9356  0.0598  -1.4831  0.0315  . 
## opioidFentanyl        0.0882  0.4617   0.1910  0.8494  -0.8442  1.0206    
## opioidRemifentanil    0.3712  0.7816   0.4750  0.6373  -1.2073  1.9497    
## opioidSulfentanil     0.0093  1.0798   0.0086  0.9932  -2.1713  2.1899    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for neuromuscular blocking agent used
meta::metareg(mbin_fia.2_random,blocker)
## Warning in rma.uni(yi = TE[!exclude], sei = seTE[!exclude], data = dataset, :
## Redundant predictors dropped from the model.
## 
## Mixed-Effects Model (k = 45; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.9799 (SE = 0.3126)
## tau (square root of estimated tau^2 value):             0.9899
## I^2 (residual heterogeneity / unaccounted variability): 47.59%
## H^2 (unaccounted variability / sampling variability):   1.91
## R^2 (amount of heterogeneity accounted for):            3.65%
## 
## Test for Residual Heterogeneity:
## QE(df = 39) = 51.8592, p-val = 0.0815
## 
## Test of Moderators (coefficients 2:6):
## F(df1 = 5, df2 = 39) = 1.3555, p-val = 0.2620
## 
## Model Results:
## 
##                         estimate      se     tval    pval    ci.lb    ci.ub 
## intrcpt                  -0.9869  0.3608  -2.7355  0.0093  -1.7166  -0.2571  ** 
## blockerAtracurium        -0.1556  0.5939  -0.2619  0.7948  -1.3569   1.0458     
## blockerCisatracurium     -0.8170  1.2823  -0.6372  0.5277  -3.4107   1.7767     
## blockerRocuronium         0.7172  0.4946   1.4502  0.1550  -0.2831   1.7176     
## blockerSuccinylcholine    0.2850  0.7122   0.4002  0.6912  -1.1556   1.7256     
## blockerVecuronium         1.3524  0.7188   1.8815  0.0674  -0.1015   2.8063   . 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Multiple Meta-regression for failed intubation within 2 attempts
str(fia.2)
## 'data.frame':    93 obs. of  18 variables:
##  $ id          : int  1 2 3 4 6 7 8 11 14 15 ...
##  $ author      : Factor w/ 86 levels "Abdallah","Abdelgalel",..: 1 1 2 3 4 5 5 6 7 8 ...
##  $ year        : int  2011 2019 2018 2015 2020 2016 2015 2015 2016 2012 ...
##  $ population  : Factor w/ 5 levels "Elderly","General",..: 4 2 2 2 3 3 2 3 2 2 ...
##  $ predicted   : Factor w/ 3 levels "","Difficult",..: 2 3 2 3 2 2 3 2 3 2 ...
##  $ set         : Factor w/ 5 levels "Emergency department",..: 4 4 2 4 4 4 4 4 4 4 ...
##  $ nature      : Factor w/ 3 levels "","Elective",..: 2 2 3 2 2 2 2 2 2 1 ...
##  $ technique   : Factor w/ 4 levels "","Rapid Sequence Induction",..: 1 3 2 3 3 1 3 3 3 1 ...
##  $ experience  : Factor w/ 3 levels "","Experienced",..: 3 2 2 2 2 2 2 2 3 1 ...
##  $ intervention: Factor w/ 1 level "videolaryngoscope": 1 1 1 1 1 1 1 1 1 1 ...
##  $ reference   : Factor w/ 1 level "Macintosh": 1 1 1 1 1 1 1 1 1 1 ...
##  $ inducer     : Factor w/ 8 levels "","Etomidate",..: 1 4 5 4 4 1 4 4 4 1 ...
##  $ opioid      : Factor w/ 5 levels "","Fentanyl",..: 1 2 2 2 2 1 1 2 4 1 ...
##  $ blocker     : Factor w/ 7 levels "","Atracurium",..: 1 3 4 4 7 1 4 4 4 1 ...
##  $ ftent.e1    : int  3 0 1 0 0 0 0 0 0 1 ...
##  $ ftent.t1    : int  50 35 80 40 40 78 25 45 64 25 ...
##  $ ftent.e2    : int  0 0 3 0 0 0 0 2 0 3 ...
##  $ ftent.t2    : int  49 35 40 20 40 96 25 45 22 25 ...
model_fia.2<-metafor::rma.uni(ai=ftent.e1,n1i = ftent.t1,ci=ftent.e2,n2i = ftent.t2,data = fia.2,method = "ML", measure = "RR", mods = ~experience+technique+population+set+nature+inducer+opioid+blocker,test = "knha")
model_fia.2
## 
## Mixed-Effects Model (k = 93; tau^2 estimator: ML)
## 
## tau^2 (estimated amount of residual heterogeneity):     0 (SE = 0.1366)
## tau (square root of estimated tau^2 value):             0
## I^2 (residual heterogeneity / unaccounted variability): 0.00%
## H^2 (unaccounted variability / sampling variability):   1.00
## R^2 (amount of heterogeneity accounted for):            100.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 60) = 22.7029, p-val = 1.0000
## 
## Test of Moderators (coefficients 2:33):
## F(df1 = 32, df2 = 60) = 3.3735, p-val < .0001
## 
## Model Results:
## 
##                                            estimate      se     tval    pval 
## intrcpt                                     -4.5728  2.3401  -1.9541  0.0554 
## experienceExperienced                       -0.9696  0.4805  -2.0180  0.0481 
## experienceNon-experienced                   -0.5228  0.4882  -1.0708  0.2885 
## techniqueRapid Sequence Induction           -0.3399  1.4925  -0.2277  0.8206 
## techniqueRegular                             1.2339  1.0700   1.1532  0.2534 
## techniqueRegular/Rapid Sequence Induction   -0.5985  1.3512  -0.4430  0.6594 
## populationGeneral                            0.0412  1.2398   0.0332  0.9736 
## populationNeck Immobilization               -0.4953  1.2696  -0.3901  0.6978 
## populationObese                              0.8696  1.3703   0.6346  0.5281 
## populationPregnant Women                     2.7639  1.5332   1.8028  0.0764 
## setICU                                       0.0785  1.5428   0.0509  0.9596 
## setMultiple                                  0.5764  1.6112   0.3578  0.7218 
## setOperating Room                            2.6595  1.8690   1.4230  0.1599 
## setOut of Hospital                          -1.9578  1.0882  -1.7992  0.0770 
## natureElective                               3.2542  0.7493   4.3428  <.0001 
## natureUrgent                                 4.8834  0.9913   4.9262  <.0001 
## inducerEtomidate                            -5.9201  1.7629  -3.3581  0.0014 
## inducerMidazolam                            -2.6264  1.6202  -1.6211  0.1102 
## inducerPropofol                             -2.4341  1.4381  -1.6926  0.0957 
## inducerPropofol/Ketamine                    -1.7416  1.8168  -0.9586  0.3416 
## inducerPropofol/Midazolam                   -2.0990  1.5688  -1.3380  0.1860 
## inducerPropofol/Thipental                   -2.6998  1.9373  -1.3936  0.1686 
## inducerThiopental                           -3.0999  1.5346  -2.0201  0.0478 
## opioidFentanyl                               0.2899  0.4474   0.6479  0.5195 
## opioidMorphine                               0.0242  1.3711   0.0177  0.9860 
## opioidRemifentanil                          -0.0629  0.5445  -0.1156  0.9084 
## opioidSulfentanil                            0.7012  0.6657   1.0533  0.2964 
## blockerAtracurium                           -0.3214  0.6717  -0.4786  0.6340 
## blockerCisatracurium                        -0.2516  0.7322  -0.3436  0.7324 
## blockerRocuronium                            0.5392  0.6463   0.8342  0.4075 
## blockerRocuronium/Vecuronium                 0.0245  1.4022   0.0175  0.9861 
## blockerSuccinylcholine                      -0.0039  0.7330  -0.0054  0.9957 
## blockerVecuronium                            0.7636  0.7146   1.0687  0.2895 
##                                              ci.lb    ci.ub 
## intrcpt                                    -9.2538   0.1081    . 
## experienceExperienced                      -1.9307  -0.0085    * 
## experienceNon-experienced                  -1.4993   0.4537      
## techniqueRapid Sequence Induction          -3.3253   2.6456      
## techniqueRegular                           -0.9064   3.3741      
## techniqueRegular/Rapid Sequence Induction  -3.3013   2.1043      
## populationGeneral                          -2.4387   2.5211      
## populationNeck Immobilization              -3.0348   2.0442      
## populationObese                            -1.8714   3.6105      
## populationPregnant Women                   -0.3028   5.8307    . 
## setICU                                     -3.0076   3.1645      
## setMultiple                                -2.6464   3.7992      
## setOperating Room                          -1.0790   6.3980      
## setOut of Hospital                         -4.1346   0.2189    . 
## natureElective                              1.7553   4.7530  *** 
## natureUrgent                                2.9005   6.8664  *** 
## inducerEtomidate                           -9.4465  -2.3937   ** 
## inducerMidazolam                           -5.8672   0.6144      
## inducerPropofol                            -5.3108   0.4425    . 
## inducerPropofol/Ketamine                   -5.3757   1.8926      
## inducerPropofol/Midazolam                  -5.2371   1.0390      
## inducerPropofol/Thipental                  -6.5749   1.1753      
## inducerThiopental                          -6.1695  -0.0303    * 
## opioidFentanyl                             -0.6051   1.1848      
## opioidMorphine                             -2.7183   2.7668      
## opioidRemifentanil                         -1.1520   1.0262      
## opioidSulfentanil                          -0.6305   2.0329      
## blockerAtracurium                          -1.6650   1.0221      
## blockerCisatracurium                       -1.7161   1.2130      
## blockerRocuronium                          -0.7537   1.8320      
## blockerRocuronium/Vecuronium               -2.7804   2.8294      
## blockerSuccinylcholine                     -1.4702   1.4623      
## blockerVecuronium                          -0.6657   2.1930      
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
metafor::permutest(model_fia.2)
## Running 1000 iterations for approximate permutation test.
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |                                                                      |   1%
  |                                                                            
  |=                                                                     |   1%
  |                                                                            
  |=                                                                     |   2%
  |                                                                            
  |==                                                                    |   2%
  |                                                                            
  |==                                                                    |   3%
  |                                                                            
  |==                                                                    |   4%
  |                                                                            
  |===                                                                   |   4%
  |                                                                            
  |===                                                                   |   5%
  |                                                                            
  |====                                                                  |   5%
  |                                                                            
  |====                                                                  |   6%
  |                                                                            
  |=====                                                                 |   6%
  |                                                                            
  |=====                                                                 |   7%
  |                                                                            
  |=====                                                                 |   8%
  |                                                                            
  |======                                                                |   8%
  |                                                                            
  |======                                                                |   9%
  |                                                                            
  |=======                                                               |   9%
  |                                                                            
  |=======                                                               |  10%
  |                                                                            
  |=======                                                               |  11%
  |                                                                            
  |========                                                              |  11%
  |                                                                            
  |========                                                              |  12%
  |                                                                            
  |=========                                                             |  12%
  |                                                                            
  |=========                                                             |  13%
  |                                                                            
  |=========                                                             |  14%
  |                                                                            
  |==========                                                            |  14%
  |                                                                            
  |==========                                                            |  15%
  |                                                                            
  |===========                                                           |  15%
  |                                                                            
  |===========                                                           |  16%
  |                                                                            
  |============                                                          |  16%
  |                                                                            
  |============                                                          |  17%
  |                                                                            
  |============                                                          |  18%
  |                                                                            
  |=============                                                         |  18%
  |                                                                            
  |=============                                                         |  19%
  |                                                                            
  |==============                                                        |  19%
  |                                                                            
  |==============                                                        |  20%
  |                                                                            
  |==============                                                        |  21%
  |                                                                            
  |===============                                                       |  21%
  |                                                                            
  |===============                                                       |  22%
  |                                                                            
  |================                                                      |  22%
  |                                                                            
  |================                                                      |  23%
  |                                                                            
  |================                                                      |  24%
  |                                                                            
  |=================                                                     |  24%
  |                                                                            
  |=================                                                     |  25%
  |                                                                            
  |==================                                                    |  25%
  |                                                                            
  |==================                                                    |  26%
  |                                                                            
  |===================                                                   |  26%
  |                                                                            
  |===================                                                   |  27%
  |                                                                            
  |===================                                                   |  28%
  |                                                                            
  |====================                                                  |  28%
  |                                                                            
  |====================                                                  |  29%
  |                                                                            
  |=====================                                                 |  29%
  |                                                                            
  |=====================                                                 |  30%
  |                                                                            
  |=====================                                                 |  31%
  |                                                                            
  |======================                                                |  31%
  |                                                                            
  |======================                                                |  32%
  |                                                                            
  |=======================                                               |  32%
  |                                                                            
  |=======================                                               |  33%
  |                                                                            
  |=======================                                               |  34%
  |                                                                            
  |========================                                              |  34%
  |                                                                            
  |========================                                              |  35%
  |                                                                            
  |=========================                                             |  35%
  |                                                                            
  |=========================                                             |  36%
  |                                                                            
  |==========================                                            |  36%
  |                                                                            
  |==========================                                            |  37%
  |                                                                            
  |==========================                                            |  38%
  |                                                                            
  |===========================                                           |  38%
  |                                                                            
  |===========================                                           |  39%
  |                                                                            
  |============================                                          |  39%
  |                                                                            
  |============================                                          |  40%
  |                                                                            
  |============================                                          |  41%
  |                                                                            
  |=============================                                         |  41%
  |                                                                            
  |=============================                                         |  42%
  |                                                                            
  |==============================                                        |  42%
  |                                                                            
  |==============================                                        |  43%
  |                                                                            
  |==============================                                        |  44%
  |                                                                            
  |===============================                                       |  44%
  |                                                                            
  |===============================                                       |  45%
  |                                                                            
  |================================                                      |  45%
  |                                                                            
  |================================                                      |  46%
  |                                                                            
  |=================================                                     |  46%
  |                                                                            
  |=================================                                     |  47%
  |                                                                            
  |=================================                                     |  48%
  |                                                                            
  |==================================                                    |  48%
  |                                                                            
  |==================================                                    |  49%
  |                                                                            
  |===================================                                   |  49%
  |                                                                            
  |===================================                                   |  50%
  |                                                                            
  |===================================                                   |  51%
  |                                                                            
  |====================================                                  |  51%
  |                                                                            
  |====================================                                  |  52%
  |                                                                            
  |=====================================                                 |  52%
  |                                                                            
  |=====================================                                 |  53%
  |                                                                            
  |=====================================                                 |  54%
  |                                                                            
  |======================================                                |  54%
  |                                                                            
  |======================================                                |  55%
  |                                                                            
  |=======================================                               |  55%
  |                                                                            
  |=======================================                               |  56%
  |                                                                            
  |========================================                              |  56%
  |                                                                            
  |========================================                              |  57%
  |                                                                            
  |========================================                              |  58%
  |                                                                            
  |=========================================                             |  58%
  |                                                                            
  |=========================================                             |  59%
  |                                                                            
  |==========================================                            |  59%
  |                                                                            
  |==========================================                            |  60%
  |                                                                            
  |==========================================                            |  61%
  |                                                                            
  |===========================================                           |  61%
  |                                                                            
  |===========================================                           |  62%
  |                                                                            
  |============================================                          |  62%
  |                                                                            
  |============================================                          |  63%
  |                                                                            
  |============================================                          |  64%
  |                                                                            
  |=============================================                         |  64%
  |                                                                            
  |=============================================                         |  65%
  |                                                                            
  |==============================================                        |  65%
  |                                                                            
  |==============================================                        |  66%
  |                                                                            
  |===============================================                       |  66%
  |                                                                            
  |===============================================                       |  67%
  |                                                                            
  |===============================================                       |  68%
  |                                                                            
  |================================================                      |  68%
  |                                                                            
  |================================================                      |  69%
  |                                                                            
  |=================================================                     |  69%
  |                                                                            
  |=================================================                     |  70%
  |                                                                            
  |=================================================                     |  71%
  |                                                                            
  |==================================================                    |  71%
  |                                                                            
  |==================================================                    |  72%
  |                                                                            
  |===================================================                   |  72%
  |                                                                            
  |===================================================                   |  73%
  |                                                                            
  |===================================================                   |  74%
  |                                                                            
  |====================================================                  |  74%
  |                                                                            
  |====================================================                  |  75%
  |                                                                            
  |=====================================================                 |  75%
  |                                                                            
  |=====================================================                 |  76%
  |                                                                            
  |======================================================                |  76%
  |                                                                            
  |======================================================                |  77%
  |                                                                            
  |======================================================                |  78%
  |                                                                            
  |=======================================================               |  78%
  |                                                                            
  |=======================================================               |  79%
  |                                                                            
  |========================================================              |  79%
  |                                                                            
  |========================================================              |  80%
  |                                                                            
  |========================================================              |  81%
  |                                                                            
  |=========================================================             |  81%
  |                                                                            
  |=========================================================             |  82%
  |                                                                            
  |==========================================================            |  82%
  |                                                                            
  |==========================================================            |  83%
  |                                                                            
  |==========================================================            |  84%
  |                                                                            
  |===========================================================           |  84%
  |                                                                            
  |===========================================================           |  85%
  |                                                                            
  |============================================================          |  85%
  |                                                                            
  |============================================================          |  86%
  |                                                                            
  |=============================================================         |  86%
  |                                                                            
  |=============================================================         |  87%
  |                                                                            
  |=============================================================         |  88%
  |                                                                            
  |==============================================================        |  88%
  |                                                                            
  |==============================================================        |  89%
  |                                                                            
  |===============================================================       |  89%
  |                                                                            
  |===============================================================       |  90%
  |                                                                            
  |===============================================================       |  91%
  |                                                                            
  |================================================================      |  91%
  |                                                                            
  |================================================================      |  92%
  |                                                                            
  |=================================================================     |  92%
  |                                                                            
  |=================================================================     |  93%
  |                                                                            
  |=================================================================     |  94%
  |                                                                            
  |==================================================================    |  94%
  |                                                                            
  |==================================================================    |  95%
  |                                                                            
  |===================================================================   |  95%
  |                                                                            
  |===================================================================   |  96%
  |                                                                            
  |====================================================================  |  96%
  |                                                                            
  |====================================================================  |  97%
  |                                                                            
  |====================================================================  |  98%
  |                                                                            
  |===================================================================== |  98%
  |                                                                            
  |===================================================================== |  99%
  |                                                                            
  |======================================================================|  99%
  |                                                                            
  |======================================================================| 100%
## 
## Test of Moderators (coefficients 2:33):
## F(df1 = 32, df2 = 60) = 3.3735, p-val* = 0.0090
## 
## Model Results:
## 
##                                            estimate      se     tval   pval* 
## intrcpt                                     -4.5728  2.3401  -1.9541  0.0590 
## experienceExperienced                       -0.9696  0.4805  -2.0180  0.0990 
## experienceNon-experienced                   -0.5228  0.4882  -1.0708  0.3790 
## techniqueRapid Sequence Induction           -0.3399  1.4925  -0.2277  0.7930 
## techniqueRegular                             1.2339  1.0700   1.1532  0.2320 
## techniqueRegular/Rapid Sequence Induction   -0.5985  1.3512  -0.4430  0.5920 
## populationGeneral                            0.0412  1.2398   0.0332  0.9730 
## populationNeck Immobilization               -0.4953  1.2696  -0.3901  0.6020 
## populationObese                              0.8696  1.3703   0.6346  0.4520 
## populationPregnant Women                     2.7639  1.5332   1.8028  0.0840 
## setICU                                       0.0785  1.5428   0.0509  0.9630 
## setMultiple                                  0.5764  1.6112   0.3578  0.6650 
## setOperating Room                            2.6595  1.8690   1.4230  0.1480 
## setOut of Hospital                          -1.9578  1.0882  -1.7992  0.0680 
## natureElective                               3.2542  0.7493   4.3428  0.0010 
## natureUrgent                                 4.8834  0.9913   4.9262  0.0010 
## inducerEtomidate                            -5.9201  1.7629  -3.3581  0.0050 
## inducerMidazolam                            -2.6264  1.6202  -1.6211  0.1210 
## inducerPropofol                             -2.4341  1.4381  -1.6926  0.1160 
## inducerPropofol/Ketamine                    -1.7416  1.8168  -0.9586  0.2930 
## inducerPropofol/Midazolam                   -2.0990  1.5688  -1.3380  0.2020 
## inducerPropofol/Thipental                   -2.6998  1.9373  -1.3936  0.1520 
## inducerThiopental                           -3.0999  1.5346  -2.0201  0.0760 
## opioidFentanyl                               0.2899  0.4474   0.6479  0.5790 
## opioidMorphine                               0.0242  1.3711   0.0177  0.9860 
## opioidRemifentanil                          -0.0629  0.5445  -0.1156  0.9190 
## opioidSulfentanil                            0.7012  0.6657   1.0533  0.3580 
## blockerAtracurium                           -0.3214  0.6717  -0.4786  0.6670 
## blockerCisatracurium                        -0.2516  0.7322  -0.3436  0.7480 
## blockerRocuronium                            0.5392  0.6463   0.8342  0.4730 
## blockerRocuronium/Vecuronium                 0.0245  1.4022   0.0175  0.9850 
## blockerSuccinylcholine                      -0.0039  0.7330  -0.0054  0.9980 
## blockerVecuronium                            0.7636  0.7146   1.0687  0.3570 
##                                              ci.lb    ci.ub 
## intrcpt                                    -9.2538   0.1081    . 
## experienceExperienced                      -1.9307  -0.0085    . 
## experienceNon-experienced                  -1.4993   0.4537      
## techniqueRapid Sequence Induction          -3.3253   2.6456      
## techniqueRegular                           -0.9064   3.3741      
## techniqueRegular/Rapid Sequence Induction  -3.3013   2.1043      
## populationGeneral                          -2.4387   2.5211      
## populationNeck Immobilization              -3.0348   2.0442      
## populationObese                            -1.8714   3.6105      
## populationPregnant Women                   -0.3028   5.8307    . 
## setICU                                     -3.0076   3.1645      
## setMultiple                                -2.6464   3.7992      
## setOperating Room                          -1.0790   6.3980      
## setOut of Hospital                         -4.1346   0.2189    . 
## natureElective                              1.7553   4.7530  *** 
## natureUrgent                                2.9005   6.8664  *** 
## inducerEtomidate                           -9.4465  -2.3937   ** 
## inducerMidazolam                           -5.8672   0.6144      
## inducerPropofol                            -5.3108   0.4425      
## inducerPropofol/Ketamine                   -5.3757   1.8926      
## inducerPropofol/Midazolam                  -5.2371   1.0390      
## inducerPropofol/Thipental                  -6.5749   1.1753      
## inducerThiopental                          -6.1695  -0.0303    . 
## opioidFentanyl                             -0.6051   1.1848      
## opioidMorphine                             -2.7183   2.7668      
## opioidRemifentanil                         -1.1520   1.0262      
## opioidSulfentanil                          -0.6305   2.0329      
## blockerAtracurium                          -1.6650   1.0221      
## blockerCisatracurium                       -1.7161   1.2130      
## blockerRocuronium                          -0.7537   1.8320      
## blockerRocuronium/Vecuronium               -2.7804   2.8294      
## blockerSuccinylcholine                     -1.4702   1.4623      
## blockerVecuronium                          -0.6657   2.1930      
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
model_fia.2.2<-metafor::rma.uni(ai=ftent.e1,n1i = ftent.t1,ci=ftent.e2,n2i = ftent.t2,data = fia.2,method = "ML", measure = "RR", mods = ~nature+inducer,test = "knha")
model_fia.2.2
## 
## Mixed-Effects Model (k = 93; tau^2 estimator: ML)
## 
## tau^2 (estimated amount of residual heterogeneity):     0 (SE = 0.1366)
## tau (square root of estimated tau^2 value):             0
## I^2 (residual heterogeneity / unaccounted variability): 0.00%
## H^2 (unaccounted variability / sampling variability):   1.00
## R^2 (amount of heterogeneity accounted for):            100.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 83) = 44.6274, p-val = 0.9998
## 
## Test of Moderators (coefficients 2:10):
## F(df1 = 9, df2 = 83) = 3.9104, p-val = 0.0004
## 
## Model Results:
## 
##                            estimate      se     tval    pval    ci.lb    ci.ub 
## intrcpt                     -2.2083  0.4288  -5.1505  <.0001  -3.0611  -1.3555 
## natureElective               1.0136  0.5457   1.8576  0.0668  -0.0717   2.0990 
## natureUrgent                 1.9601  0.4778   4.1024  <.0001   1.0098   2.9104 
## inducerEtomidate            -1.8388  1.1215  -1.6395  0.1049  -4.0695   0.3919 
## inducerMidazolam             1.1780  0.9190   1.2819  0.2034  -0.6498   3.0058 
## inducerPropofol              0.8414  0.3895   2.1600  0.0337   0.0666   1.6162 
## inducerPropofol/Ketamine    -1.5435  0.8609  -1.7929  0.0766  -3.2559   0.1688 
## inducerPropofol/Midazolam    1.8737  0.6184   3.0299  0.0033   0.6437   3.1037 
## inducerPropofol/Thipental    1.1947  1.5011   0.7959  0.4284  -1.7909   4.1803 
## inducerThiopental            0.7802  0.6316   1.2353  0.2202  -0.4760   2.0365 
##  
## intrcpt                    *** 
## natureElective               . 
## natureUrgent               *** 
## inducerEtomidate 
## inducerMidazolam 
## inducerPropofol              * 
## inducerPropofol/Ketamine     . 
## inducerPropofol/Midazolam   ** 
## inducerPropofol/Thipental 
## inducerThiopental 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Funnel Plot for failed intubation within 2 attempts
meta::funnel(mbin_fia.2_random,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))
meta::metabias(mbin_fia.2_random, method.bias = "linreg")
## 
##  Linear regression test of funnel plot asymmetry
## 
## data:  mbin_fia.2_random
## t = -0.63476, df = 43, p-value = 0.5289
## alternative hypothesis: asymmetry in funnel plot
## sample estimates:
##       bias    se.bias  intercept 
## -0.2700241  0.4253953 -0.3326992
dmetar::eggers.test(mbin_fia.2_random)
##              Intercept ConfidenceInterval      t       p
## Egger's test     -0.27       -1.054-0.514 -0.635 0.52895
meta::funnel(mbin_fia.2_random,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))

Percentage of Glottic Opening (POGO)

pogo<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Table Pairwise VLs POGO.csv")

#Number of comparisons and patients meta-analized for POGO
length(pogo$mpogo.1)
## [1] 20
sum(pogo$fpogo.1,pogo$fpogo.2)
## [1] 1870
#Meta-analysis for POGO
mcont_pogo<-meta::metacont(fpogo.1,mpogo.1,sdpogo.1,fpogo.2,mpogo.2,sdpogo.2,data=pogo,studlab = paste(author,year),comb.fixed = FALSE,comb.random = TRUE,prediction = TRUE,sm="SMD")
mcont_pogo
##                     SMD             95%-CI %W(random)
## Abdallah 2019    2.5911 [ 1.9471;  3.2350]        5.0
## Akbarzadeh 2017 -0.2385 [-0.7156;  0.2387]        5.3
## Ali 2012        12.3543 [ 9.7713; 14.9373]        1.6
## Aqil 2016        1.1848 [ 0.7079;  1.6618]        5.3
## Aqil 2017        1.1739 [ 0.8144;  1.5333]        5.5
## Arici 2014       0.7374 [ 0.2837;  1.1910]        5.3
## Choi 2011        0.9662 [ 0.4295;  1.5030]        5.2
## Dey 2020         1.4882 [ 1.1876;  1.7888]        5.6
## Dhonneur 2008    0.9683 [ 0.6835;  1.2532]        5.6
## Khan 2008        0.7597 [ 0.1458;  1.3736]        5.0
## Kido 2015        0.9630 [ 0.3748;  1.5512]        5.1
## Koh 2010         3.6379 [ 2.7110;  4.5649]        4.3
## Jafra 2018      -0.3196 [-0.5986; -0.0406]        5.6
## Ruetzeler 2020   0.6690 [ 0.3140;  1.0241]        5.5
## Sargin 2016      0.8283 [ 0.4192;  1.2374]        5.4
## Shah 2016        0.8846 [ 0.3527;  1.4165]        5.2
## Tsan 2020        0.3062 [-0.0295;  0.6419]        5.5
## Yumul 2016       0.9978 [ 0.2460;  1.7496]        4.7
## Yumul 2016       1.2070 [ 0.4390;  1.9749]        4.7
## Yumul 2016       1.4607 [ 0.6940;  2.2275]        4.7
## 
## Number of studies combined: k = 20
## 
##                         SMD            95%-CI    z  p-value
## Random effects model 1.2120 [ 0.8246; 1.5994] 6.13 < 0.0001
## Prediction interval         [-0.5688; 2.9928]              
## 
## Quantifying heterogeneity:
##  tau^2 = 0.6794 [0.9781; 4.1082]; tau = 0.8243 [0.9890; 2.0269];
##  I^2 = 92.8% [90.3%; 94.7%]; H = 3.73 [3.20; 4.35]
## 
## Test of heterogeneity:
##       Q d.f.  p-value
##  264.44   19 < 0.0001
## 
## Details on meta-analytical method:
## - Inverse variance method
## - DerSimonian-Laird estimator for tau^2
## - Jackson method for confidence interval of tau^2 and tau
## - Hedges' g (bias corrected standardised mean difference)
#Forest plot for POGO
meta::forest(mcont_pogo,sortvar=TE,lab.e="Videolaryngoscopes",lab.c="Macintosh",col.study="black",col.square="black",col.diamond="blue")

meta::forest(mcont_pogo,sortvar=TE,lab.e="Videolaryngoscopes",lab.c="Macintosh",col.study="black",col.square="black",col.diamond="blue")

#Detecting Outliers for POGO
dmetar::find.outliers(mcont_pogo)
## Identified outliers (random-effects model) 
## ------------------------------------------ 
## "Abdallah 2019", "Akbarzadeh 2017", "Ali 2012", "Koh 2010", "Jafra 2018", "Tsan 2020" 
##  
## Results with outliers removed 
## ----------------------------- 
##                     SMD             95%-CI %W(random) exclude
## Abdallah 2019    2.5911 [ 1.9471;  3.2350]        0.0       *
## Akbarzadeh 2017 -0.2385 [-0.7156;  0.2387]        0.0       *
## Ali 2012        12.3543 [ 9.7713; 14.9373]        0.0       *
## Aqil 2016        1.1848 [ 0.7079;  1.6618]        7.0        
## Aqil 2017        1.1739 [ 0.8144;  1.5333]       10.0        
## Arici 2014       0.7374 [ 0.2837;  1.1910]        7.5        
## Choi 2011        0.9662 [ 0.4295;  1.5030]        5.9        
## Dey 2020         1.4882 [ 1.1876;  1.7888]       12.1        
## Dhonneur 2008    0.9683 [ 0.6835;  1.2532]       12.8        
## Khan 2008        0.7597 [ 0.1458;  1.3736]        4.8        
## Kido 2015        0.9630 [ 0.3748;  1.5512]        5.1        
## Koh 2010         3.6379 [ 2.7110;  4.5649]        0.0       *
## Jafra 2018      -0.3196 [-0.5986; -0.0406]        0.0       *
## Ruetzeler 2020   0.6690 [ 0.3140;  1.0241]       10.2        
## Sargin 2016      0.8283 [ 0.4192;  1.2374]        8.6        
## Shah 2016        0.8846 [ 0.3527;  1.4165]        6.0        
## Tsan 2020        0.3062 [-0.0295;  0.6419]        0.0       *
## Yumul 2016       0.9978 [ 0.2460;  1.7496]        3.4        
## Yumul 2016       1.2070 [ 0.4390;  1.9749]        3.3        
## Yumul 2016       1.4607 [ 0.6940;  2.2275]        3.3        
## 
## Number of studies combined: k = 14
## 
##                         SMD           95%-CI     z  p-value
## Random effects model 1.0172 [0.8669; 1.1675] 13.27 < 0.0001
## Prediction interval         [0.6351; 1.3993]               
## 
## Quantifying heterogeneity:
##  tau^2 = 0.0249 [0.0000; 0.1177]; tau = 0.1577 [0.0000; 0.3431];
##  I^2 = 32.2% [0.0%; 64.2%]; H = 1.21 [1.00; 1.67]
## 
## Test of heterogeneity:
##      Q d.f. p-value
##  19.19   13  0.1174
## 
## Details on meta-analytical method:
## - Inverse variance method
## - DerSimonian-Laird estimator for tau^2
## - Jackson method for confidence interval of tau^2 and tau
## - Hedges' g (bias corrected standardised mean difference)
#Meta-regression for POGO

#Controling for population characteristics (general, obese, neck immobilization)
meta::metareg(mcont_pogo,population)
## 
## Mixed-Effects Model (k = 20; tau^2 estimator: DL)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.7030 (SE = 0.3235)
## tau (square root of estimated tau^2 value):             0.8384
## I^2 (residual heterogeneity / unaccounted variability): 93.28%
## H^2 (unaccounted variability / sampling variability):   14.89
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 15) = 223.3704, p-val < .0001
## 
## Test of Moderators (coefficients 2:5):
## QM(df = 4) = 8.7555, p-val = 0.0675
## 
## Model Results:
## 
##                                estimate      se     zval    pval    ci.lb 
## intrcpt                          1.3339  0.6543   2.0386  0.0415   0.0515 
## populationGeneral               -0.0495  0.7039  -0.0703  0.9439  -1.4291 
## populationNeck Immobilization    2.3040  1.1639   1.9795  0.0478   0.0228 
## populationObese                 -0.7396  0.7870  -0.9397  0.3474  -2.2821 
## populationPregnant Women        -0.5965  1.0884  -0.5480  0.5837  -2.7298 
##                                 ci.ub 
## intrcpt                        2.6163  * 
## populationGeneral              1.3300    
## populationNeck Immobilization  4.5853  * 
## populationObese                0.8030    
## populationPregnant Women       1.5368    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for operator experience
meta::metareg(mcont_pogo,experience)
## 
## Mixed-Effects Model (k = 20; tau^2 estimator: DL)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.7590 (SE = 0.3375)
## tau (square root of estimated tau^2 value):             0.8712
## I^2 (residual heterogeneity / unaccounted variability): 93.43%
## H^2 (unaccounted variability / sampling variability):   15.22
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 17) = 258.6587, p-val < .0001
## 
## Test of Moderators (coefficients 2:3):
## QM(df = 2) = 2.6418, p-val = 0.2669
## 
## Model Results:
## 
##                            estimate      se     zval    pval    ci.lb   ci.ub 
## intrcpt                      2.0614  0.5953   3.4629  0.0005   0.8947  3.2282 
## experienceExperienced       -0.8825  0.6437  -1.3710  0.1704  -2.1441  0.3791 
## experienceNon-experienced   -1.2516  0.7918  -1.5807  0.1139  -2.8035  0.3003 
##  
## intrcpt                    *** 
## experienceExperienced 
## experienceNon-experienced 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for intubation technique applied (regular, rapid sequence induction)
meta::metareg(mcont_pogo,technique)
## 
## Mixed-Effects Model (k = 20; tau^2 estimator: DL)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.7120 (SE = 0.3158)
## tau (square root of estimated tau^2 value):             0.8438
## I^2 (residual heterogeneity / unaccounted variability): 92.65%
## H^2 (unaccounted variability / sampling variability):   13.60
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 17) = 231.1976, p-val < .0001
## 
## Test of Moderators (coefficients 2:3):
## QM(df = 2) = 5.7932, p-val = 0.0552
## 
## Model Results:
## 
##                                            estimate      se     zval    pval 
## intrcpt                                      2.1314  0.4333   4.9189  <.0001 
## techniqueRegular                            -1.1433  0.4937  -2.3160  0.0206 
## techniqueRegular/Rapid Sequence Induction   -1.4624  0.9657  -1.5143  0.1299 
##                                              ci.lb    ci.ub 
## intrcpt                                     1.2821   2.9806  *** 
## techniqueRegular                           -2.1109  -0.1758    * 
## techniqueRegular/Rapid Sequence Induction  -3.3551   0.4303      
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for setting (operating room, out of hospital, etc.)
meta::metareg(mcont_pogo,set)
## 
## Mixed-Effects Model (k = 20; tau^2 estimator: DL)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.7040 (SE = 0.3050)
## tau (square root of estimated tau^2 value):             0.8390
## I^2 (residual heterogeneity / unaccounted variability): 92.61%
## H^2 (unaccounted variability / sampling variability):   13.53
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 18) = 243.4598, p-val < .0001
## 
## Test of Moderators (coefficient 2):
## QM(df = 1) = 0.1069, p-val = 0.7437
## 
## Model Results:
## 
##                    estimate      se     zval    pval    ci.lb   ci.ub 
## intrcpt              1.4882  0.8529   1.7448  0.0810  -0.1835  3.1599  . 
## setOperating Room   -0.2870  0.8776  -0.3270  0.7437  -2.0070  1.4331    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for nature of procedure (elective, urgency)
meta::metareg(mcont_pogo,nature)
## 
## Mixed-Effects Model (k = 20; tau^2 estimator: DL)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.6580 (SE = 0.2813)
## tau (square root of estimated tau^2 value):             0.8112
## I^2 (residual heterogeneity / unaccounted variability): 92.83%
## H^2 (unaccounted variability / sampling variability):   13.95
## R^2 (amount of heterogeneity accounted for):            3.15%
## 
## Test for Residual Heterogeneity:
## QE(df = 18) = 251.0281, p-val < .0001
## 
## Test of Moderators (coefficient 2):
## QM(df = 1) = 9.6068, p-val = 0.0019
## 
## Model Results:
## 
##                 estimate      se     zval    pval    ci.lb    ci.ub 
## intrcpt           2.9302  0.5890   4.9750  <.0001   1.7758   4.0846  *** 
## natureElective   -1.9345  0.6241  -3.0995  0.0019  -3.1577  -0.7112   ** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for inducer used
meta::metareg(mcont_pogo,inducer)
## 
## Mixed-Effects Model (k = 20; tau^2 estimator: DL)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.6885 (SE = 0.3092)
## tau (square root of estimated tau^2 value):             0.8297
## I^2 (residual heterogeneity / unaccounted variability): 92.44%
## H^2 (unaccounted variability / sampling variability):   13.22
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 17) = 224.7526, p-val < .0001
## 
## Test of Moderators (coefficients 2:3):
## QM(df = 2) = 16.4061, p-val = 0.0003
## 
## Model Results:
## 
##                    estimate      se     zval    pval    ci.lb    ci.ub 
## intrcpt              3.9542  0.7419   5.3298  <.0001   2.5001   5.4083  *** 
## inducerPropofol     -2.8554  0.7736  -3.6909  0.0002  -4.3717  -1.3391  *** 
## inducerThiopental   -3.7029  0.9607  -3.8545  0.0001  -5.5857  -1.8200  *** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for opioid used
meta::metareg(mcont_pogo,opioid)
## 
## Mixed-Effects Model (k = 20; tau^2 estimator: DL)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.7819 (SE = 0.3538)
## tau (square root of estimated tau^2 value):             0.8843
## I^2 (residual heterogeneity / unaccounted variability): 93.41%
## H^2 (unaccounted variability / sampling variability):   15.18
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 16) = 242.9356, p-val < .0001
## 
## Test of Moderators (coefficients 2:4):
## QM(df = 3) = 5.1584, p-val = 0.1606
## 
## Model Results:
## 
##                     estimate      se     zval    pval    ci.lb   ci.ub 
## intrcpt               1.6458  0.3750   4.3890  <.0001   0.9108  2.3807  *** 
## opioidFentanyl       -0.8029  0.4728  -1.6982  0.0895  -1.7296  0.1238    . 
## opioidRemifentanil    0.5595  0.7795   0.7178  0.4729  -0.9683  2.0873      
## opioidSulfentanil    -0.6774  0.9714  -0.6974  0.4856  -2.5814  1.2265      
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for neuromuscular blocking agent used
meta::metareg(mcont_pogo,blocker)
## 
## Mixed-Effects Model (k = 20; tau^2 estimator: DL)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.7136 (SE = 0.3429)
## tau (square root of estimated tau^2 value):             0.8447
## I^2 (residual heterogeneity / unaccounted variability): 92.29%
## H^2 (unaccounted variability / sampling variability):   12.97
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 14) = 181.6135, p-val < .0001
## 
## Test of Moderators (coefficients 2:6):
## QM(df = 5) = 17.6985, p-val = 0.0033
## 
## Model Results:
## 
##                         estimate      se     zval    pval    ci.lb    ci.ub 
## intrcpt                   4.0009  0.7528   5.3150  <.0001   2.5255   5.4763 
## blockerAtracurium        -3.6830  0.9781  -3.7656  0.0002  -5.6000  -1.7660 
## blockerCisatracurium     -2.3752  0.9091  -2.6128  0.0090  -4.1570  -0.5935 
## blockerRocuronium        -3.1003  0.8150  -3.8042  0.0001  -4.6976  -1.5030 
## blockerSuccinylcholine   -2.8509  0.8796  -3.2411  0.0012  -4.5748  -1.1269 
## blockerVecuronium        -3.2412  1.1740  -2.7608  0.0058  -5.5422  -0.9401 
##  
## intrcpt                 *** 
## blockerAtracurium       *** 
## blockerCisatracurium     ** 
## blockerRocuronium       *** 
## blockerSuccinylcholine   ** 
## blockerVecuronium        ** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Multiple Meta-regression POGO
str(pogo)
## 'data.frame':    20 obs. of  20 variables:
##  $ id          : int  2 12 15 24 25 26 62 65 66 88 ...
##  $ author      : Factor w/ 17 levels "Abdallah","Akbarzadeh",..: 1 2 3 4 4 5 6 7 8 10 ...
##  $ year        : int  2019 2017 2012 2016 2017 2014 2011 2020 2008 2008 ...
##  $ population  : Factor w/ 5 levels "","General","Neck Immobilization",..: 2 4 2 2 2 5 2 2 4 2 ...
##  $ predicted   : Factor w/ 3 levels "","Difficult",..: 3 2 2 3 3 3 3 1 1 3 ...
##  $ set         : Factor w/ 2 levels "ICU","Operating Room": 2 2 2 2 2 2 2 1 2 2 ...
##  $ nature      : Factor w/ 2 levels "","Elective": 2 2 1 2 2 2 2 2 2 2 ...
##  $ technique   : Factor w/ 3 levels "","Regular","Regular/Rapid Sequence Induction": 2 2 1 2 2 2 2 1 2 2 ...
##  $ experience  : Factor w/ 3 levels "","Experienced",..: 2 1 1 3 2 2 1 2 2 2 ...
##  $ intervention: Factor w/ 8 levels "Airtraq","C-MAC D",..: 1 4 1 4 4 6 4 3 1 7 ...
##  $ reference   : Factor w/ 1 level "Macintosh": 1 1 1 1 1 1 1 1 1 1 ...
##  $ inducer     : Factor w/ 3 levels "","Propofol",..: 2 3 1 2 2 3 2 1 2 2 ...
##  $ opioid      : Factor w/ 4 levels "","Fentanyl",..: 2 2 1 2 2 1 1 2 4 1 ...
##  $ blocker     : Factor w/ 6 levels "","Atracurium",..: 3 2 1 3 3 4 4 1 5 6 ...
##  $ mpogo.1     : num  97 62 97 88.2 77.1 ...
##  $ sdpogo.1    : num  2.41 28 3 22.06 28.9 ...
##  $ fpogo.1     : int  35 34 25 40 70 40 30 108 106 22 ...
##  $ mpogo.2     : num  90.6 69 65 57.2 43.4 ...
##  $ sdpogo.2    : num  2.49 30 2 29.26 28.2 ...
##  $ fpogo.2     : int  35 34 25 40 70 40 30 110 106 22 ...
model_pogo<-metafor::rma.uni(m1i = mpogo.1,sd1i = sdpogo.1,n1i = fpogo.1,m2i = mpogo.2,sd2i = sdpogo.2,n2i = fpogo.2,data = pogo,method = "ML", measure = "SMD", mods = ~experience+technique+population+set+nature+inducer+opioid+blocker,test = "knha")
## Warning in metafor::rma.uni(m1i = mpogo.1, sd1i = sdpogo.1, n1i = fpogo.1, :
## Redundant predictors dropped from the model.
model_pogo
## 
## Mixed-Effects Model (k = 20; tau^2 estimator: ML)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.0540 (SE = 0.0351)
## tau (square root of estimated tau^2 value):             0.2324
## I^2 (residual heterogeneity / unaccounted variability): 51.16%
## H^2 (unaccounted variability / sampling variability):   2.05
## R^2 (amount of heterogeneity accounted for):            98.63%
## 
## Test for Residual Heterogeneity:
## QE(df = 4) = 38.1516, p-val < .0001
## 
## Test of Moderators (coefficients 2:16):
## F(df1 = 15, df2 = 4) = 2.3514, p-val = 0.2119
## 
## Model Results:
## 
##                                            estimate      se     tval    pval 
## intrcpt                                      2.4248  1.6212   1.4957  0.2091 
## experienceExperienced                       -0.2065  1.0999  -0.1878  0.8602 
## experienceNon-experienced                   -0.3416  1.2333  -0.2770  0.7955 
## techniqueRegular                            -0.3384  1.2590  -0.2688  0.8014 
## techniqueRegular/Rapid Sequence Induction   -0.1732  2.2699  -0.0763  0.9428 
## populationGeneral                           12.3811  3.1898   3.8814  0.0178 
## populationNeck Immobilization               15.2592  3.6455   4.1858  0.0139 
## populationObese                             12.6192  3.8378   3.2881  0.0303 
## populationPregnant Women                    13.9264  5.1692   2.6941  0.0544 
## setOperating Room                           -2.4518  2.1777  -1.1258  0.3232 
## natureElective                             -12.6172  3.2654  -3.8639  0.0181 
## inducerPropofol                              1.5677  2.2530   0.6958  0.5248 
## opioidFentanyl                              -0.4940  0.9004  -0.5487  0.6124 
## opioidSulfentanil                           -0.0295  1.0899  -0.0270  0.9797 
## blockerAtracurium                            0.6189  0.8401   0.7367  0.5022 
## blockerCisatracurium                         1.2920  0.5325   2.4264  0.0723 
##                                               ci.lb    ci.ub 
## intrcpt                                     -2.0763   6.9259    
## experienceExperienced                       -3.2602   2.8472    
## experienceNon-experienced                   -3.7659   3.0826    
## techniqueRegular                            -3.8340   3.1572    
## techniqueRegular/Rapid Sequence Induction   -6.4755   6.1291    
## populationGeneral                            3.5247  21.2374  * 
## populationNeck Immobilization                5.1377  25.3808  * 
## populationObese                              1.9637  23.2747  * 
## populationPregnant Women                    -0.4255  28.2784  . 
## setOperating Room                           -8.4981   3.5946    
## natureElective                             -21.6833  -3.5510  * 
## inducerPropofol                             -4.6876   7.8230    
## opioidFentanyl                              -2.9938   2.0058    
## opioidSulfentanil                           -3.0555   2.9965    
## blockerAtracurium                           -1.7135   2.9513    
## blockerCisatracurium                        -0.1864   2.7704  . 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
model_pogo.2<-metafor::rma.uni(m1i = mpogo.1,sd1i = sdpogo.1,n1i = fpogo.1,m2i = mpogo.2,sd2i = sdpogo.2,n2i = fpogo.2,data = pogo,method = "ML", measure = "SMD", mods = ~population+nature+blocker,test = "knha")
model_pogo.2
## 
## Mixed-Effects Model (k = 20; tau^2 estimator: ML)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.1181 (SE = 0.0580)
## tau (square root of estimated tau^2 value):             0.3436
## I^2 (residual heterogeneity / unaccounted variability): 68.87%
## H^2 (unaccounted variability / sampling variability):   3.21
## R^2 (amount of heterogeneity accounted for):            96.99%
## 
## Test for Residual Heterogeneity:
## QE(df = 9) = 65.3844, p-val < .0001
## 
## Test of Moderators (coefficients 2:11):
## F(df1 = 10, df2 = 9) = 6.0316, p-val = 0.0062
## 
## Model Results:
## 
##                                estimate      se     tval    pval     ci.lb 
## intrcpt                          1.5949  0.9816   1.6248  0.1386   -0.6256 
## populationGeneral               10.7593  2.0869   5.1557  0.0006    6.0384 
## populationNeck Immobilization   13.8369  2.2314   6.2009  0.0002    8.7890 
## populationObese                 10.5103  2.0282   5.1820  0.0006    5.9221 
## populationPregnant Women        10.9364  2.1603   5.0625  0.0007    6.0496 
## natureElective                 -10.8660  1.9157  -5.6720  0.0003  -15.1996 
## blockerAtracurium               -1.0572  0.7241  -1.4600  0.1783   -2.6952 
## blockerCisatracurium             0.0689  0.6292   0.1095  0.9152   -1.3545 
## blockerRocuronium               -0.9280  0.5827  -1.5925  0.1457   -2.2461 
## blockerSuccinylcholine          -0.2607  0.8360  -0.3119  0.7622   -2.1519 
## blockerVecuronium               -0.7285  0.8381  -0.8692  0.4073   -2.6243 
##                                  ci.ub 
## intrcpt                         3.8153      
## populationGeneral              15.4801  *** 
## populationNeck Immobilization  18.8848  *** 
## populationObese                15.0985  *** 
## populationPregnant Women       15.8233  *** 
## natureElective                 -6.5323  *** 
## blockerAtracurium               0.5809      
## blockerCisatracurium            1.4923      
## blockerRocuronium               0.3902      
## blockerSuccinylcholine          1.6305      
## blockerVecuronium               1.1673      
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
metafor::permutest(model_pogo)
## Running 1000 iterations for approximate permutation test.
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |                                                                      |   1%
  |                                                                            
  |=                                                                     |   1%
  |                                                                            
  |=                                                                     |   2%
  |                                                                            
  |==                                                                    |   2%
  |                                                                            
  |==                                                                    |   3%
  |                                                                            
  |==                                                                    |   4%
  |                                                                            
  |===                                                                   |   4%
  |                                                                            
  |===                                                                   |   5%
  |                                                                            
  |====                                                                  |   5%
  |                                                                            
  |====                                                                  |   6%
  |                                                                            
  |=====                                                                 |   6%
  |                                                                            
  |=====                                                                 |   7%
  |                                                                            
  |=====                                                                 |   8%
  |                                                                            
  |======                                                                |   8%
  |                                                                            
  |======                                                                |   9%
  |                                                                            
  |=======                                                               |   9%
  |                                                                            
  |=======                                                               |  10%
  |                                                                            
  |=======                                                               |  11%
  |                                                                            
  |========                                                              |  11%
  |                                                                            
  |========                                                              |  12%
  |                                                                            
  |=========                                                             |  12%
  |                                                                            
  |=========                                                             |  13%
  |                                                                            
  |=========                                                             |  14%
  |                                                                            
  |==========                                                            |  14%
  |                                                                            
  |==========                                                            |  15%
  |                                                                            
  |===========                                                           |  15%
  |                                                                            
  |===========                                                           |  16%
  |                                                                            
  |============                                                          |  16%
  |                                                                            
  |============                                                          |  17%
  |                                                                            
  |============                                                          |  18%
  |                                                                            
  |=============                                                         |  18%
  |                                                                            
  |=============                                                         |  19%
  |                                                                            
  |==============                                                        |  19%
  |                                                                            
  |==============                                                        |  20%
  |                                                                            
  |==============                                                        |  21%
  |                                                                            
  |===============                                                       |  21%
  |                                                                            
  |===============                                                       |  22%
  |                                                                            
  |================                                                      |  22%
  |                                                                            
  |================                                                      |  23%
  |                                                                            
  |================                                                      |  24%
  |                                                                            
  |=================                                                     |  24%
  |                                                                            
  |=================                                                     |  25%
  |                                                                            
  |==================                                                    |  25%
  |                                                                            
  |==================                                                    |  26%
  |                                                                            
  |===================                                                   |  26%
  |                                                                            
  |===================                                                   |  27%
  |                                                                            
  |===================                                                   |  28%
  |                                                                            
  |====================                                                  |  28%
  |                                                                            
  |====================                                                  |  29%
  |                                                                            
  |=====================                                                 |  29%
  |                                                                            
  |=====================                                                 |  30%
  |                                                                            
  |=====================                                                 |  31%
  |                                                                            
  |======================                                                |  31%
  |                                                                            
  |======================                                                |  32%
  |                                                                            
  |=======================                                               |  32%
  |                                                                            
  |=======================                                               |  33%Error in rma.uni(x$yi, x$vi, weights = x$weights, mods = cbind(X[sample(x$k),  : 
##   Fisher scoring algorithm did not converge. See 'help(rma)' for possible remedies.
## 
  |                                                                            
  |=======================                                               |  34%
  |                                                                            
  |========================                                              |  34%
  |                                                                            
  |========================                                              |  35%
  |                                                                            
  |=========================                                             |  35%
  |                                                                            
  |=========================                                             |  36%
  |                                                                            
  |==========================                                            |  36%
  |                                                                            
  |==========================                                            |  37%
  |                                                                            
  |==========================                                            |  38%
  |                                                                            
  |===========================                                           |  38%
  |                                                                            
  |===========================                                           |  39%
  |                                                                            
  |============================                                          |  39%
  |                                                                            
  |============================                                          |  40%
  |                                                                            
  |============================                                          |  41%
  |                                                                            
  |=============================                                         |  41%
  |                                                                            
  |=============================                                         |  42%
  |                                                                            
  |==============================                                        |  42%
  |                                                                            
  |==============================                                        |  43%
  |                                                                            
  |==============================                                        |  44%
  |                                                                            
  |===============================                                       |  44%
  |                                                                            
  |===============================                                       |  45%
  |                                                                            
  |================================                                      |  45%
  |                                                                            
  |================================                                      |  46%
  |                                                                            
  |=================================                                     |  46%
  |                                                                            
  |=================================                                     |  47%
  |                                                                            
  |=================================                                     |  48%
  |                                                                            
  |==================================                                    |  48%
  |                                                                            
  |==================================                                    |  49%
  |                                                                            
  |===================================                                   |  49%
  |                                                                            
  |===================================                                   |  50%
  |                                                                            
  |===================================                                   |  51%
  |                                                                            
  |====================================                                  |  51%
  |                                                                            
  |====================================                                  |  52%
  |                                                                            
  |=====================================                                 |  52%
  |                                                                            
  |=====================================                                 |  53%
  |                                                                            
  |=====================================                                 |  54%
  |                                                                            
  |======================================                                |  54%
  |                                                                            
  |======================================                                |  55%
  |                                                                            
  |=======================================                               |  55%
  |                                                                            
  |=======================================                               |  56%
  |                                                                            
  |========================================                              |  56%
  |                                                                            
  |========================================                              |  57%
  |                                                                            
  |========================================                              |  58%
  |                                                                            
  |=========================================                             |  58%
  |                                                                            
  |=========================================                             |  59%
  |                                                                            
  |==========================================                            |  59%
  |                                                                            
  |==========================================                            |  60%
  |                                                                            
  |==========================================                            |  61%
  |                                                                            
  |===========================================                           |  61%
  |                                                                            
  |===========================================                           |  62%
  |                                                                            
  |============================================                          |  62%
  |                                                                            
  |============================================                          |  63%
  |                                                                            
  |============================================                          |  64%
  |                                                                            
  |=============================================                         |  64%
  |                                                                            
  |=============================================                         |  65%
  |                                                                            
  |==============================================                        |  65%
  |                                                                            
  |==============================================                        |  66%
  |                                                                            
  |===============================================                       |  66%
  |                                                                            
  |===============================================                       |  67%
  |                                                                            
  |===============================================                       |  68%
  |                                                                            
  |================================================                      |  68%
  |                                                                            
  |================================================                      |  69%
  |                                                                            
  |=================================================                     |  69%
  |                                                                            
  |=================================================                     |  70%
  |                                                                            
  |=================================================                     |  71%
  |                                                                            
  |==================================================                    |  71%
  |                                                                            
  |==================================================                    |  72%
  |                                                                            
  |===================================================                   |  72%
  |                                                                            
  |===================================================                   |  73%
  |                                                                            
  |===================================================                   |  74%
  |                                                                            
  |====================================================                  |  74%
  |                                                                            
  |====================================================                  |  75%
  |                                                                            
  |=====================================================                 |  75%
  |                                                                            
  |=====================================================                 |  76%
  |                                                                            
  |======================================================                |  76%
  |                                                                            
  |======================================================                |  77%
  |                                                                            
  |======================================================                |  78%
  |                                                                            
  |=======================================================               |  78%
  |                                                                            
  |=======================================================               |  79%
  |                                                                            
  |========================================================              |  79%
  |                                                                            
  |========================================================              |  80%
  |                                                                            
  |========================================================              |  81%
  |                                                                            
  |=========================================================             |  81%
  |                                                                            
  |=========================================================             |  82%
  |                                                                            
  |==========================================================            |  82%
  |                                                                            
  |==========================================================            |  83%
  |                                                                            
  |==========================================================            |  84%
  |                                                                            
  |===========================================================           |  84%
  |                                                                            
  |===========================================================           |  85%
  |                                                                            
  |============================================================          |  85%
  |                                                                            
  |============================================================          |  86%
  |                                                                            
  |=============================================================         |  86%
  |                                                                            
  |=============================================================         |  87%
  |                                                                            
  |=============================================================         |  88%
  |                                                                            
  |==============================================================        |  88%
  |                                                                            
  |==============================================================        |  89%
  |                                                                            
  |===============================================================       |  89%
  |                                                                            
  |===============================================================       |  90%
  |                                                                            
  |===============================================================       |  91%
  |                                                                            
  |================================================================      |  91%
  |                                                                            
  |================================================================      |  92%
  |                                                                            
  |=================================================================     |  92%
  |                                                                            
  |=================================================================     |  93%
  |                                                                            
  |=================================================================     |  94%
  |                                                                            
  |==================================================================    |  94%
  |                                                                            
  |==================================================================    |  95%
  |                                                                            
  |===================================================================   |  95%
  |                                                                            
  |===================================================================   |  96%
  |                                                                            
  |====================================================================  |  96%
  |                                                                            
  |====================================================================  |  97%
  |                                                                            
  |====================================================================  |  98%
  |                                                                            
  |===================================================================== |  98%
  |                                                                            
  |===================================================================== |  99%
  |                                                                            
  |======================================================================|  99%
  |                                                                            
  |======================================================================| 100%
## 
## Test of Moderators (coefficients 2:16):
## F(df1 = 15, df2 = 4) = 2.3514, p-val* = 0.2850
## 
## Model Results:
## 
##                                            estimate      se     tval   pval* 
## intrcpt                                      2.4248  1.6212   1.4957  0.3140 
## experienceExperienced                       -0.2065  1.0999  -0.1878  0.6620 
## experienceNon-experienced                   -0.3416  1.2333  -0.2770  0.7420 
## techniqueRegular                            -0.3384  1.2590  -0.2688  0.7570 
## techniqueRegular/Rapid Sequence Induction   -0.1732  2.2699  -0.0763  0.9430 
## populationGeneral                           12.3811  3.1898   3.8814  0.0890 
## populationNeck Immobilization               15.2592  3.6455   4.1858  0.0640 
## populationObese                             12.6192  3.8378   3.2881  0.1410 
## populationPregnant Women                    13.9264  5.1692   2.6941  0.2250 
## setOperating Room                           -2.4518  2.1777  -1.1258  0.4740 
## natureElective                             -12.6172  3.2654  -3.8639  0.0960 
## inducerPropofol                              1.5677  2.2530   0.6958  0.4690 
## opioidFentanyl                              -0.4940  0.9004  -0.5487  0.4780 
## opioidSulfentanil                           -0.0295  1.0899  -0.0270  0.9290 
## blockerAtracurium                            0.6189  0.8401   0.7367  0.3890 
## blockerCisatracurium                         1.2920  0.5325   2.4264  0.0500 
##                                               ci.lb    ci.ub 
## intrcpt                                     -2.0763   6.9259    
## experienceExperienced                       -3.2602   2.8472    
## experienceNon-experienced                   -3.7659   3.0826    
## techniqueRegular                            -3.8340   3.1572    
## techniqueRegular/Rapid Sequence Induction   -6.4755   6.1291    
## populationGeneral                            3.5247  21.2374  . 
## populationNeck Immobilization                5.1377  25.3808  . 
## populationObese                              1.9637  23.2747    
## populationPregnant Women                    -0.4255  28.2784    
## setOperating Room                           -8.4981   3.5946    
## natureElective                             -21.6833  -3.5510  . 
## inducerPropofol                             -4.6876   7.8230    
## opioidFentanyl                              -2.9938   2.0058    
## opioidSulfentanil                           -3.0555   2.9965    
## blockerAtracurium                           -1.7135   2.9513    
## blockerCisatracurium                        -0.1864   2.7704  * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Funnel Plot for POGO
meta::funnel(mcont_pogo,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))
meta::metabias(mcont_pogo, method.bias = "linreg")
## 
##  Linear regression test of funnel plot asymmetry
## 
## data:  mcont_pogo
## t = 3.138, df = 18, p-value = 0.005686
## alternative hypothesis: asymmetry in funnel plot
## sample estimates:
##       bias    se.bias  intercept 
##  5.7903355  1.8452203 -0.3896163
dmetar::eggers.test(mcont_pogo)
##              Intercept ConfidenceInterval     t       p
## Egger's test      5.79        2.262-9.318 3.138 0.00569
meta::funnel(mcont_pogo,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))

trimfill_pogo<-meta::trimfill(mcont_pogo)
trimfill_pogo
##                            SMD              95%-CI %W(random)
## Abdallah 2019           2.5911 [  1.9471;  3.2350]        3.8
## Akbarzadeh 2017        -0.2385 [ -0.7156;  0.2387]        4.0
## Ali 2012               12.3543 [  9.7713; 14.9373]        1.6
## Aqil 2016               1.1848 [  0.7079;  1.6618]        4.0
## Aqil 2017               1.1739 [  0.8144;  1.5333]        4.1
## Arici 2014              0.7374 [  0.2837;  1.1910]        4.0
## Choi 2011               0.9662 [  0.4295;  1.5030]        3.9
## Dey 2020                1.4882 [  1.1876;  1.7888]        4.1
## Dhonneur 2008           0.9683 [  0.6835;  1.2532]        4.1
## Khan 2008               0.7597 [  0.1458;  1.3736]        3.8
## Kido 2015               0.9630 [  0.3748;  1.5512]        3.9
## Koh 2010                3.6379 [  2.7110;  4.5649]        3.5
## Jafra 2018             -0.3196 [ -0.5986; -0.0406]        4.1
## Ruetzeler 2020          0.6690 [  0.3140;  1.0241]        4.1
## Sargin 2016             0.8283 [  0.4192;  1.2374]        4.0
## Shah 2016               0.8846 [  0.3527;  1.4165]        3.9
## Tsan 2020               0.3062 [ -0.0295;  0.6419]        4.1
## Yumul 2016              0.9978 [  0.2460;  1.7496]        3.7
## Yumul 2016              1.2070 [  0.4390;  1.9749]        3.7
## Yumul 2016              1.4607 [  0.6940;  2.2275]        3.7
## Filled: Aqil 2016      -0.0376 [ -0.5146;  0.4394]        4.0
## Filled: Yumul 2016     -0.0597 [ -0.8277;  0.7082]        3.7
## Filled: Yumul 2016     -0.3135 [ -1.0802;  0.4532]        3.7
## Filled: Dey 2020       -0.3409 [ -0.6415; -0.0403]        4.1
## Filled: Abdallah 2019  -1.4438 [ -2.0878; -0.7998]        3.8
## Filled: Koh 2010       -2.4907 [ -3.4176; -1.5638]        3.5
## Filled: Ali 2012      -11.2071 [-13.7901; -8.6240]        1.6
## 
## Number of studies combined: k = 27 (with 7 added studies)
## 
##                         SMD            95%-CI    z p-value
## Random effects model 0.6203 [ 0.2087; 1.0319] 2.95  0.0031
## Prediction interval         [-1.5367; 2.7773]             
## 
## Quantifying heterogeneity:
##  tau^2 = 1.0528 [1.7739; 6.0484]; tau = 1.0260 [1.3319; 2.4593];
##  I^2 = 94.8% [93.4%; 95.9%]; H = 4.38 [3.89; 4.93]
## 
## Test of heterogeneity:
##       Q d.f.  p-value
##  498.35   26 < 0.0001
## 
## Details on meta-analytical method:
## - Inverse variance method
## - DerSimonian-Laird estimator for tau^2
## - Jackson method for confidence interval of tau^2 and tau
## - Trim-and-fill method to adjust for funnel plot asymmetry

Difficult Intubation - Different Scales

difficult<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Table Pairwise VLs Difficult Intubation.csv")
length(difficult$fdif.e1)
## [1] 37
#Number of comparisons with zero events in both arms
difficult_zeros<-dplyr::filter(difficult,difficult$fdif.e1==0 & difficult$fdif.e2==0)
length(difficult_zeros$fdif.e1)
## [1] 1
#Table for Meta-analysis of Difficult intubation
difficult_analysis<-dplyr::filter(difficult,difficult$fdif.e1>0 | difficult$fdif.e2>0)

#Number of comparisons and patients meta-analized for Difficult Intubation
length(difficult_analysis$fdif.e1)
## [1] 36
sum(difficult_analysis$fdif.t1,difficult_analysis$fdif.t2)
## [1] 3037
#Meta-analysis for Difficult Intubation
mbin_difficult_random<-meta::metabin(fdif.e1,fdif.t1,fdif.e2,fdif.t2,data = difficult_analysis,studlab = paste(author, year),comb.fixed = FALSE,comb.random = TRUE,method.tau = "SJ",hakn = TRUE,prediction = TRUE,incr = 0.5,sm = "RR")
mbin_difficult_random
##                            RR           95%-CI %W(random)
## Agrawal 2020           0.3827 [0.2588; 0.5659]        3.9
## Ahmad 2016             1.0000 [0.9774; 1.0231]        4.3
## Ali 2017               0.7083 [0.4940; 1.0157]        4.0
## Aoi 2010               0.1875 [0.0667; 0.5272]        2.5
## Arima 2014             0.6235 [0.3964; 0.9807]        3.8
## Barak 2007             0.8036 [0.2655; 2.4320]        2.4
## Wasem 2013             0.5000 [0.2530; 0.9882]        3.3
## Yao 2015               1.2500 [0.3573; 4.3736]        2.1
## Bhalla 2018            1.7143 [0.9434; 3.1151]        3.5
## Bhandari 2013          0.0435 [0.0062; 0.3067]        1.2
## Bharti 2014            0.6107 [0.3509; 1.0628]        3.6
## Chandrashekaraiah 2017 0.9565 [0.7140; 1.2815]        4.1
## Di Marco 2011          0.6500 [0.3612; 1.1698]        3.5
## Hamp 2015              0.2941 [0.0362; 2.3868]        1.1
## Hosalli 2017           0.3333 [0.1794; 0.6195]        3.4
## Kaur 2020              0.3214 [0.1524; 0.6780]        3.1
## Kim 2013               0.1556 [0.0594; 0.4074]        2.7
## Kumar_2 2019           0.1786 [0.0798; 0.3997]        3.0
## Kunaz 2016             1.3333 [0.3144; 5.6542]        1.8
## Lascarrou 2017         0.9946 [0.4879; 2.0277]        3.2
## Maharaj 2006           0.3125 [0.1313; 0.7439]        2.9
## Maharaj 2007           0.0714 [0.0103; 0.4930]        1.2
## Maharaj 2008           0.2683 [0.1313; 0.5484]        3.2
## Malik 2008             0.5513 [0.4261; 0.7133]        4.1
## Malik_1 2009           0.4167 [0.2940; 0.5906]        4.0
## Malik_2 2009           0.1818 [0.0712; 0.4643]        2.7
## Mcelwain 2011          0.7055 [0.5318; 0.9360]        4.1
## Kulkarni 2013          0.6667 [0.1198; 3.7087]        1.5
## Ndoko 2008             0.0909 [0.0052; 1.6036]        0.7
## Pappu 2020             0.2000 [0.0100; 3.9955]        0.6
## Pazur 2016             0.8889 [0.4067; 1.9430]        3.1
## Reena 2019             0.5000 [0.0959; 2.6074]        1.5
## Ruetzeler 2020         0.4522 [0.2214; 0.9234]        3.2
## Tolon 2012             0.2857 [0.1136; 0.7186]        2.8
## Vijayakumar 2016       0.7000 [0.2924; 1.6759]        2.9
## Yoo 2018               0.3333 [0.0375; 2.9625]        1.0
## 
## Number of studies combined: k = 36
## 
##                          RR           95%-CI     t  p-value
## Random effects model 0.4822 [0.3793; 0.6131] -6.17 < 0.0001
## Prediction interval         [0.1308; 1.7778]               
## 
## Quantifying heterogeneity:
##  tau^2 = 0.3981 [0.1156; 0.6790]; tau = 0.6310 [0.3400; 0.8240];
##  I^2 = 84.1% [78.8%; 88.0%]; H = 2.51 [2.17; 2.89]
## 
## Test of heterogeneity:
##       Q d.f.  p-value
##  219.74   35 < 0.0001
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Continuity correction of 0.5 in studies with zero cell frequencies
#Estimated probability of Difficult Intubation with Macintosh
meta::metaprop(event = fdif.e2,n = fdif.t2 ,studlab = paste(author,year),data = difficult,method = "GLMM",sm = "PLOGIT",comb.fixed = FALSE,comb.random = TRUE,hakn = TRUE)
##                        proportion           95%-CI
## Agrawal 2020               1.0000 [0.9119; 1.0000]
## Ahmad 2016                 1.0000 [0.9623; 1.0000]
## Ali 2017                   0.8000 [0.6143; 0.9229]
## Aoi 2010                   0.9412 [0.7131; 0.9985]
## Arima 2014                 0.6415 [0.4980; 0.7686]
## Barak 2007                 0.0778 [0.0318; 0.1537]
## Wasem 2013                 0.5333 [0.3433; 0.7166]
## Yao 2015                   0.0833 [0.0232; 0.1998]
## Bhalla 2018                0.4667 [0.2127; 0.7341]
## Bhandari 2013              0.5750 [0.4089; 0.7296]
## Bharti 2014                0.7368 [0.4880; 0.9085]
## Chandrashekaraiah 2017     0.7667 [0.5772; 0.9007]
## Di Marco 2011              0.3704 [0.2429; 0.5126]
## Hamp 2015                  0.2000 [0.0573; 0.4366]
## Hosalli 2017               0.8000 [0.6143; 0.9229]
## Kaur 2020                  0.3500 [0.2063; 0.5168]
## Kim 2013                   1.0000 [0.8518; 1.0000]
## Kumar_2 2019               0.9333 [0.7793; 0.9918]
## Kunaz 2016                 0.0600 [0.0125; 0.1655]
## Lascarrou 2017             0.0757 [0.0420; 0.1237]
## Maharaj 2006               0.5333 [0.3433; 0.7166]
## Maharaj 2007               0.7000 [0.4572; 0.8811]
## Maharaj 2008               1.0000 [0.8316; 1.0000]
## Malik 2008                 0.8667 [0.6928; 0.9624]
## Malik_1 2009               0.9600 [0.7965; 0.9990]
## Malik_2 2009               0.7333 [0.5411; 0.8772]
## Mcelwain 2011              0.8065 [0.6253; 0.9255]
## Kulkarni 2013              0.1000 [0.0211; 0.2653]
## Ndoko 2008                 0.0943 [0.0313; 0.2066]
## Pappu 2020                 0.0667 [0.0082; 0.2207]
## Pazur 2016                 0.3462 [0.1721; 0.5567]
## Reena 2019                 0.0800 [0.0222; 0.1923]
## Ruetzeler 2020             0.3016 [0.1923; 0.4302]
## Takenaka 2011              0.0000 [0.0000; 0.1028]
## Tolon 2012                 0.7000 [0.4572; 0.8811]
## Vijayakumar 2016           0.2222 [0.1120; 0.3709]
## Yoo 2018                   0.1364 [0.0291; 0.3491]
## 
## Number of studies combined: k = 37
## 
##                      proportion           95%-CI
## Random effects model     0.5488 [0.3562; 0.7279]
## 
## Quantifying heterogeneity:
##  tau^2 = 5.5045; tau = 2.3462; I^2 = 96.4%; H = 5.25
## 
## Test of heterogeneity:
##       Q d.f.  p-value             Test
##  352.62   36 < 0.0001        Wald-type
##  928.58   36 < 0.0001 Likelihood-Ratio
## 
## Details on meta-analytical method:
## - Random intercept logistic regression model
## - Maximum-likelihood estimator for tau^2
## - Hartung-Knapp adjustment for random effects model
## - Logit transformation
## - Clopper-Pearson confidence interval for individual studies
## - Continuity correction of 0.5 in studies with zero cell frequencies
##   (only used to calculate individual study results)
#Forest plot for Difficult Intubation
meta::forest(mbin_difficult_random,sortvar=TE,lab.e="Videolaryngoscopes",lab.c="Macintosh",col.study="black",col.square="black",col.diamond="blue")

meta::forest(mbin_difficult_random,sortvar=TE,lab.e="Videolaryngoscopes",lab.c="Macintosh",col.study="black",col.square="black",col.diamond="blue")

#Detecting Outliers for Difficult Intubation
dmetar::find.outliers(mbin_difficult_random)
## Identified outliers (random-effects model) 
## ------------------------------------------ 
## "Ahmad 2016", "Bhalla 2018", "Bhandari 2013", "Chandrashekaraiah 2017" 
##  
## Results with outliers removed 
## ----------------------------- 
##                            RR           95%-CI %W(random) exclude
## Agrawal 2020           0.3827 [0.2588; 0.5659]        4.9        
## Ahmad 2016             1.0000 [0.9774; 1.0231]        0.0       *
## Ali 2017               0.7083 [0.4940; 1.0157]        5.0        
## Aoi 2010               0.1875 [0.0667; 0.5272]        2.7        
## Arima 2014             0.6235 [0.3964; 0.9807]        4.7        
## Barak 2007             0.8036 [0.2655; 2.4320]        2.5        
## Wasem 2013             0.5000 [0.2530; 0.9882]        3.8        
## Yao 2015               1.2500 [0.3573; 4.3736]        2.2        
## Bhalla 2018            1.7143 [0.9434; 3.1151]        0.0       *
## Bhandari 2013          0.0435 [0.0062; 0.3067]        0.0       *
## Bharti 2014            0.6107 [0.3509; 1.0628]        4.3        
## Chandrashekaraiah 2017 0.9565 [0.7140; 1.2815]        0.0       *
## Di Marco 2011          0.6500 [0.3612; 1.1698]        4.2        
## Hamp 2015              0.2941 [0.0362; 2.3868]        1.0        
## Hosalli 2017           0.3333 [0.1794; 0.6195]        4.1        
## Kaur 2020              0.3214 [0.1524; 0.6780]        3.6        
## Kim 2013               0.1556 [0.0594; 0.4074]        2.9        
## Kumar_2 2019           0.1786 [0.0798; 0.3997]        3.4        
## Kunaz 2016             1.3333 [0.3144; 5.6542]        1.8        
## Lascarrou 2017         0.9946 [0.4879; 2.0277]        3.7        
## Maharaj 2006           0.3125 [0.1313; 0.7439]        3.2        
## Maharaj 2007           0.0714 [0.0103; 0.4930]        1.2        
## Maharaj 2008           0.2683 [0.1313; 0.5484]        3.7        
## Malik 2008             0.5513 [0.4261; 0.7133]        5.3        
## Malik_1 2009           0.4167 [0.2940; 0.5906]        5.0        
## Malik_2 2009           0.1818 [0.0712; 0.4643]        3.0        
## Mcelwain 2011          0.7055 [0.5318; 0.9360]        5.2        
## Kulkarni 2013          0.6667 [0.1198; 3.7087]        1.4        
## Ndoko 2008             0.0909 [0.0052; 1.6036]        0.6        
## Pappu 2020             0.2000 [0.0100; 3.9955]        0.6        
## Pazur 2016             0.8889 [0.4067; 1.9430]        3.5        
## Reena 2019             0.5000 [0.0959; 2.6074]        1.5        
## Ruetzeler 2020         0.4522 [0.2214; 0.9234]        3.7        
## Tolon 2012             0.2857 [0.1136; 0.7186]        3.0        
## Vijayakumar 2016       0.7000 [0.2924; 1.6759]        3.2        
## Yoo 2018               0.3333 [0.0375; 2.9625]        1.0        
## 
## Number of studies combined: k = 32
## 
##                          RR           95%-CI     t  p-value
## Random effects model 0.4494 [0.3632; 0.5560] -7.66 < 0.0001
## Prediction interval         [0.1560; 1.2945]               
## 
## Quantifying heterogeneity:
##  tau^2 = 0.2575 [0.0263; 0.4328]; tau = 0.5074 [0.1621; 0.6579];
##  I^2 = 48.3% [22.0%; 65.8%]; H = 1.39 [1.13; 1.71]
## 
## Test of heterogeneity:
##      Q d.f. p-value
##  60.00   31  0.0013
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Continuity correction of 0.5 in studies with zero cell frequencies
#Influence Analysis for Difficult Intubation
inf_analysis_difficult<-dmetar::InfluenceAnalysis(mbin_difficult_random,random = TRUE)
## [===========================================================================] DONE
plot(inf_analysis_difficult,"baujat")

#Meta-regression for Difficult Intubation

#Controling for population characteristics (general, obese, neck immobilization)
meta::metareg(mbin_difficult_random,population)
## 
## Mixed-Effects Model (k = 36; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.4092 (SE = 0.1218)
## tau (square root of estimated tau^2 value):             0.6397
## I^2 (residual heterogeneity / unaccounted variability): 88.04%
## H^2 (unaccounted variability / sampling variability):   8.36
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 31) = 175.1138, p-val < .0001
## 
## Test of Moderators (coefficients 2:5):
## F(df1 = 4, df2 = 31) = 0.7567, p-val = 0.5613
## 
## Model Results:
## 
##                                estimate      se     tval    pval    ci.lb 
## intrcpt                         -1.6094  1.5081  -1.0672  0.2941  -4.6852 
## populationElderly                1.8971  1.7504   1.0838  0.2868  -1.6728 
## populationGeneral                0.8997  1.5181   0.5927  0.5577  -2.1964 
## populationNeck Immobilization    0.7514  1.5193   0.4945  0.6244  -2.3473 
## populationObese                  1.2949  1.5717   0.8239  0.4163  -1.9106 
##                                 ci.ub 
## intrcpt                        1.4663    
## populationElderly              5.4671    
## populationGeneral              3.9959    
## populationNeck Immobilization  3.8501    
## populationObese                4.5005    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for operator experience
meta::metareg(mbin_difficult_random,experience)
## 
## Mixed-Effects Model (k = 36; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.3964 (SE = 0.1173)
## tau (square root of estimated tau^2 value):             0.6296
## I^2 (residual heterogeneity / unaccounted variability): 88.57%
## H^2 (unaccounted variability / sampling variability):   8.75
## R^2 (amount of heterogeneity accounted for):            0.44%
## 
## Test for Residual Heterogeneity:
## QE(df = 33) = 216.2448, p-val < .0001
## 
## Test of Moderators (coefficients 2:3):
## F(df1 = 2, df2 = 33) = 0.9269, p-val = 0.4058
## 
## Model Results:
## 
##                            estimate      se     tval    pval    ci.lb    ci.ub 
## intrcpt                     -0.8420  0.3364  -2.5030  0.0174  -1.5264  -0.1576 
## experienceExperienced        0.0723  0.3620   0.1997  0.8429  -0.6642   0.8088 
## experienceNon-experienced    0.6172  0.5171   1.1936  0.2411  -0.4348   1.6692 
##  
## intrcpt                    * 
## experienceExperienced 
## experienceNon-experienced 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for intubation technique applied (regular, rapid sequence induction)
meta::metareg(mbin_difficult_random,technique)
## 
## Mixed-Effects Model (k = 36; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.3913 (SE = 0.1174)
## tau (square root of estimated tau^2 value):             0.6256
## I^2 (residual heterogeneity / unaccounted variability): 82.27%
## H^2 (unaccounted variability / sampling variability):   5.64
## R^2 (amount of heterogeneity accounted for):            1.71%
## 
## Test for Residual Heterogeneity:
## QE(df = 32) = 96.2461, p-val < .0001
## 
## Test of Moderators (coefficients 2:4):
## F(df1 = 3, df2 = 32) = 1.2232, p-val = 0.3173
## 
## Model Results:
## 
##                                            estimate      se     tval    pval 
## intrcpt                                     -0.2349  0.3386  -0.6936  0.4929 
## techniqueRapid Sequence Induction           -2.1630  1.4741  -1.4674  0.1520 
## techniqueRegular                            -0.5477  0.3618  -1.5139  0.1399 
## techniqueRegular/Rapid Sequence Induction   -0.5589  0.7349  -0.7605  0.4525 
##                                              ci.lb   ci.ub 
## intrcpt                                    -0.9245  0.4548    
## techniqueRapid Sequence Induction          -5.1656  0.8395    
## techniqueRegular                           -1.2847  0.1892    
## techniqueRegular/Rapid Sequence Induction  -2.0557  0.9380    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for setting (operating room, out of hospital, etc.)
meta::metareg(mbin_difficult_random,set)
## 
## Mixed-Effects Model (k = 36; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.4045 (SE = 0.1191)
## tau (square root of estimated tau^2 value):             0.6360
## I^2 (residual heterogeneity / unaccounted variability): 89.72%
## H^2 (unaccounted variability / sampling variability):   9.73
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 33) = 216.0493, p-val < .0001
## 
## Test of Moderators (coefficients 2:3):
## F(df1 = 2, df2 = 33) = 0.7233, p-val = 0.4927
## 
## Model Results:
## 
##                     estimate      se     tval    pval    ci.lb   ci.ub 
## intrcpt              -0.0054  0.6651  -0.0081  0.9936  -1.3585  1.3478    
## setOperating Room    -0.7604  0.6765  -1.1240  0.2691  -2.1368  0.6160    
## setOut of Hospital   -0.4670  0.9055  -0.5157  0.6095  -2.3091  1.3752    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for nature of procedure (elective, urgency)
meta::metareg(mbin_difficult_random,nature)
## 
## Mixed-Effects Model (k = 36; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.4029 (SE = 0.1174)
## tau (square root of estimated tau^2 value):             0.6348
## I^2 (residual heterogeneity / unaccounted variability): 89.81%
## H^2 (unaccounted variability / sampling variability):   9.82
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 33) = 216.1537, p-val < .0001
## 
## Test of Moderators (coefficients 2:3):
## F(df1 = 2, df2 = 33) = 0.7498, p-val = 0.4804
## 
## Model Results:
## 
##                 estimate      se     tval    pval    ci.lb   ci.ub 
## intrcpt          -1.6094  1.5028  -1.0710  0.2920  -4.6669  1.4480    
## natureElective    0.8496  1.5079   0.5634  0.5769  -2.2182  3.9175    
## natureUrgent      1.3520  1.5689   0.8618  0.3950  -1.8400  4.5440    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for inducer used
meta::metareg(mbin_difficult_random,inducer)
## 
## Mixed-Effects Model (k = 36; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.4123 (SE = 0.1196)
## tau (square root of estimated tau^2 value):             0.6421
## I^2 (residual heterogeneity / unaccounted variability): 83.03%
## H^2 (unaccounted variability / sampling variability):   5.89
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 33) = 107.2379, p-val < .0001
## 
## Test of Moderators (coefficients 2:3):
## F(df1 = 2, df2 = 33) = 0.4005, p-val = 0.6732
## 
## Model Results:
## 
##                             estimate      se     tval    pval    ci.lb   ci.ub 
## intrcpt                      -0.4905  0.3197  -1.5345  0.1344  -1.1409  0.1599 
## inducerPropofol              -0.2873  0.3457  -0.8313  0.4118  -0.9906  0.4159 
## inducerPropofol/Thiopental    0.0851  1.0400   0.0818  0.9353  -2.0308  2.2010 
##  
## intrcpt 
## inducerPropofol 
## inducerPropofol/Thiopental 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for opioid used
meta::metareg(mbin_difficult_random,opioid)
## 
## Mixed-Effects Model (k = 36; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.4233 (SE = 0.1229)
## tau (square root of estimated tau^2 value):             0.6506
## I^2 (residual heterogeneity / unaccounted variability): 83.68%
## H^2 (unaccounted variability / sampling variability):   6.13
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 32) = 109.2604, p-val < .0001
## 
## Test of Moderators (coefficients 2:4):
## F(df1 = 3, df2 = 32) = 0.3384, p-val = 0.7977
## 
## Model Results:
## 
##                     estimate      se     tval    pval    ci.lb    ci.ub 
## intrcpt              -0.7437  0.3047  -2.4404  0.0204  -1.3644  -0.1230  * 
## opioidFentanyl       -0.0181  0.3352  -0.0540  0.9573  -0.7008   0.6646    
## opioidRemifentanil   -0.3549  1.2197  -0.2910  0.7729  -2.8394   2.1296    
## opioidSulfentanil     0.4814  0.5882   0.8185  0.4191  -0.7166   1.6795    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for neuromuscular blocking agent used
meta::metareg(mbin_difficult_random,blocker)
## 
## Mixed-Effects Model (k = 36; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.4075 (SE = 0.1257)
## tau (square root of estimated tau^2 value):             0.6384
## I^2 (residual heterogeneity / unaccounted variability): 83.65%
## H^2 (unaccounted variability / sampling variability):   6.12
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 30) = 99.8246, p-val < .0001
## 
## Test of Moderators (coefficients 2:6):
## F(df1 = 5, df2 = 30) = 0.8840, p-val = 0.5039
## 
## Model Results:
## 
##                         estimate      se     tval    pval    ci.lb   ci.ub 
## intrcpt                  -0.4972  0.2744  -1.8119  0.0800  -1.0576  0.0632  . 
## blockerAtracurium        -0.5093  0.3640  -1.3991  0.1720  -1.2528  0.2342    
## blockerCisatracurium      0.0664  0.6956   0.0955  0.9246  -1.3542  1.4870    
## blockerRocuronium        -0.1397  0.3911  -0.3571  0.7235  -0.9383  0.6590    
## blockerSuccinylcholine    0.1148  0.4243   0.2705  0.7886  -0.7518  0.9814    
## blockerVecuronium        -0.4800  0.3805  -1.2617  0.2168  -1.2570  0.2970    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for scale definition
meta::metareg(mbin_difficult_random,def.scale)
## 
## Mixed-Effects Model (k = 36; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.3914 (SE = 0.1286)
## tau (square root of estimated tau^2 value):             0.6256
## I^2 (residual heterogeneity / unaccounted variability): 91.32%
## H^2 (unaccounted variability / sampling variability):   11.51
## R^2 (amount of heterogeneity accounted for):            1.69%
## 
## Test for Residual Heterogeneity:
## QE(df = 26) = 196.3464, p-val < .0001
## 
## Test of Moderators (coefficients 2:10):
## F(df1 = 9, df2 = 26) = 0.9500, p-val = 0.5010
## 
## Model Results:
## 
##                                estimate      se     tval    pval    ci.lb 
## intrcpt                         -2.2280  0.7807  -2.8537  0.0084  -3.8329 
## def.scaleEase of intubation      1.4343  1.0241   1.4006  0.1732  -0.6707 
## def.scaleease of intubation\n    1.5349  1.2379   1.2399  0.2261  -1.0097 
## def.scaleIDS                     1.4625  0.7922   1.8462  0.0763  -0.1658 
## def.scaleKaur own                1.0930  1.0291   1.0621  0.2979  -1.0223 
## def.scaleKulkarni own            1.8226  1.2569   1.4500  0.1590  -0.7611 
## def.scaleKurnarz own             2.5157  1.1802   2.1317  0.0427   0.0899 
## def.scaleLascarrou own           2.2226  1.0238   2.1709  0.0393   0.1182 
## def.scaleScale                   2.4512  1.1312   2.1668  0.0396   0.1259 
## def.scaleScale 1-3               2.0093  1.0977   1.8304  0.0787  -0.2471 
##                                  ci.ub 
## intrcpt                        -0.6232  ** 
## def.scaleEase of intubation     3.5393     
## def.scaleease of intubation\n   4.0794     
## def.scaleIDS                    3.0908   . 
## def.scaleKaur own               3.2084     
## def.scaleKulkarni own           4.4062     
## def.scaleKurnarz own            4.9416   * 
## def.scaleLascarrou own          4.3271   * 
## def.scaleScale                  4.7764   * 
## def.scaleScale 1-3              4.2658   . 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Multiple Meta-regression for Difficult intubation
str(first)
## 'data.frame':    110 obs. of  18 variables:
##  $ id          : int  1 2 3 4 6 7 8 11 14 15 ...
##  $ author      : Factor w/ 100 levels "Abdallah","Abdelgalel",..: 1 1 2 3 4 5 5 6 7 8 ...
##  $ year        : int  2011 2019 2018 2015 2020 2016 2015 2015 2016 2012 ...
##  $ population  : Factor w/ 5 levels "Elderly","General",..: 4 2 2 2 3 3 2 3 2 2 ...
##  $ predicted   : Factor w/ 3 levels "","Difficult",..: 2 3 2 3 2 2 3 2 3 2 ...
##  $ set         : Factor w/ 5 levels "Emergency department",..: 4 4 2 4 4 4 4 4 4 4 ...
##  $ nature      : Factor w/ 3 levels "","Elective",..: 2 2 3 2 2 2 2 2 2 1 ...
##  $ technique   : Factor w/ 4 levels "","Rapid Sequence Induction",..: 1 3 2 3 3 1 3 3 3 1 ...
##  $ experience  : Factor w/ 3 levels "","Experienced",..: 3 2 2 2 2 2 2 2 3 1 ...
##  $ intervention: Factor w/ 1 level "Videolaryngoscope": 1 1 1 1 1 1 1 1 1 1 ...
##  $ reference   : Factor w/ 1 level "Macintosh": 1 1 1 1 1 1 1 1 1 1 ...
##  $ inducer     : Factor w/ 9 levels "","Etomidate",..: 1 4 6 4 4 1 4 4 4 1 ...
##  $ opioid      : Factor w/ 6 levels "","Fentanyl",..: 1 2 2 2 2 1 1 2 5 1 ...
##  $ blocker     : Factor w/ 8 levels "","Atracurium",..: 1 3 4 4 8 1 4 4 4 1 ...
##  $ ffirst.e1   : int  7 1 5 0 0 0 0 1 25 3 ...
##  $ ffirst.t1   : int  50 35 80 40 40 78 25 45 64 25 ...
##  $ ffirst.e2   : int  4 2 11 1 1 0 0 6 6 9 ...
##  $ ffirst.t2   : int  49 35 40 40 40 96 25 45 22 25 ...
model_difficult<-metafor::rma.uni(ai=fdif.e1,n1i = fdif.t1,ci=fdif.e2,n2i = fdif.t2,data = difficult,method = "ML", measure = "RR", mods = ~experience+population+inducer+def.scale,test = "knha")
## Warning in metafor::rma.uni(ai = fdif.e1, n1i = fdif.t1, ci = fdif.e2, n2i =
## fdif.t2, : Redundant predictors dropped from the model.
model_difficult
## 
## Mixed-Effects Model (k = 37; tau^2 estimator: ML)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.0861 (SE = 0.0447)
## tau (square root of estimated tau^2 value):             0.2934
## I^2 (residual heterogeneity / unaccounted variability): 51.95%
## H^2 (unaccounted variability / sampling variability):   2.08
## R^2 (amount of heterogeneity accounted for):            61.84%
## 
## Test for Residual Heterogeneity:
## QE(df = 21) = 67.2893, p-val < .0001
## 
## Test of Moderators (coefficients 2:16):
## F(df1 = 15, df2 = 21) = 0.7718, p-val = 0.6925
## 
## Model Results:
## 
##                                estimate      se     tval    pval    ci.lb 
## intrcpt                         -2.8360  2.3831  -1.1900  0.2473  -7.7920 
## experienceExperienced           -0.4050  0.3759  -1.0772  0.2936  -1.1868 
## experienceNon-experienced        0.0277  0.6576   0.0421  0.9668  -1.3400 
## populationElderly                3.8950  2.6295   1.4813  0.1534  -1.5733 
## populationGeneral                1.1510  2.1352   0.5391  0.5955  -3.2893 
## populationNeck Immobilization    1.2235  2.1446   0.5705  0.5744  -3.2365 
## populationObese                  2.3053  2.2080   1.0440  0.3083  -2.2865 
## inducerPropofol                 -0.3663  0.3210  -1.1410  0.2667  -1.0339 
## inducerPropofol/Thiopental       1.6845  1.6562   1.0171  0.3207  -1.7597 
## def.scaleEase of intubation      0.5083  1.3477   0.3771  0.7099  -2.2945 
## def.scaleease of intubation\n    1.7631  1.5994   1.1023  0.2828  -1.5631 
## def.scaleIDS                     1.5929  1.0510   1.5156  0.1445  -0.5928 
## def.scaleKaur own                1.3213  1.2329   1.0717  0.2960  -1.2427 
## def.scaleLascarrou own           1.6520  1.3978   1.1818  0.2505  -1.2549 
## def.scaleScale                   2.6794  1.4156   1.8928  0.0722  -0.2644 
## def.scaleScale 1-3               1.8050  1.4740   1.2245  0.2343  -1.2604 
##                                 ci.ub 
## intrcpt                        2.1200    
## experienceExperienced          0.3769    
## experienceNon-experienced      1.3953    
## populationElderly              9.3632    
## populationGeneral              5.5913    
## populationNeck Immobilization  5.6836    
## populationObese                6.8970    
## inducerPropofol                0.3013    
## inducerPropofol/Thiopental     5.1288    
## def.scaleEase of intubation    3.3111    
## def.scaleease of intubation\n  5.0894    
## def.scaleIDS                   3.7786    
## def.scaleKaur own              3.8853    
## def.scaleLascarrou own         4.5589    
## def.scaleScale                 5.6233  . 
## def.scaleScale 1-3             4.8704    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
metafor::permutest(model_difficult)
## Running 1000 iterations for approximate permutation test.
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |                                                                      |   1%
  |                                                                            
  |=                                                                     |   1%
  |                                                                            
  |=                                                                     |   2%Error in rma.uni(x$yi, x$vi, weights = x$weights, mods = cbind(X[sample(x$k),  : 
##   Fisher scoring algorithm did not converge. See 'help(rma)' for possible remedies.
## 
  |                                                                            
  |==                                                                    |   2%
  |                                                                            
  |==                                                                    |   3%
  |                                                                            
  |==                                                                    |   4%
  |                                                                            
  |===                                                                   |   4%
  |                                                                            
  |===                                                                   |   5%
  |                                                                            
  |====                                                                  |   5%
  |                                                                            
  |====                                                                  |   6%
  |                                                                            
  |=====                                                                 |   6%
  |                                                                            
  |=====                                                                 |   7%
  |                                                                            
  |=====                                                                 |   8%
  |                                                                            
  |======                                                                |   8%
  |                                                                            
  |======                                                                |   9%
  |                                                                            
  |=======                                                               |   9%
  |                                                                            
  |=======                                                               |  10%
  |                                                                            
  |=======                                                               |  11%
  |                                                                            
  |========                                                              |  11%
  |                                                                            
  |========                                                              |  12%
  |                                                                            
  |=========                                                             |  12%
  |                                                                            
  |=========                                                             |  13%
  |                                                                            
  |=========                                                             |  14%
  |                                                                            
  |==========                                                            |  14%
  |                                                                            
  |==========                                                            |  15%
  |                                                                            
  |===========                                                           |  15%
  |                                                                            
  |===========                                                           |  16%
  |                                                                            
  |============                                                          |  16%
  |                                                                            
  |============                                                          |  17%
  |                                                                            
  |============                                                          |  18%
  |                                                                            
  |=============                                                         |  18%
  |                                                                            
  |=============                                                         |  19%
  |                                                                            
  |==============                                                        |  19%
  |                                                                            
  |==============                                                        |  20%
  |                                                                            
  |==============                                                        |  21%
  |                                                                            
  |===============                                                       |  21%
  |                                                                            
  |===============                                                       |  22%
  |                                                                            
  |================                                                      |  22%
  |                                                                            
  |================                                                      |  23%
  |                                                                            
  |================                                                      |  24%
  |                                                                            
  |=================                                                     |  24%
  |                                                                            
  |=================                                                     |  25%
  |                                                                            
  |==================                                                    |  25%
  |                                                                            
  |==================                                                    |  26%
  |                                                                            
  |===================                                                   |  26%
  |                                                                            
  |===================                                                   |  27%
  |                                                                            
  |===================                                                   |  28%
  |                                                                            
  |====================                                                  |  28%
  |                                                                            
  |====================                                                  |  29%
  |                                                                            
  |=====================                                                 |  29%
  |                                                                            
  |=====================                                                 |  30%
  |                                                                            
  |=====================                                                 |  31%
  |                                                                            
  |======================                                                |  31%
  |                                                                            
  |======================                                                |  32%
  |                                                                            
  |=======================                                               |  32%
  |                                                                            
  |=======================                                               |  33%
  |                                                                            
  |=======================                                               |  34%
  |                                                                            
  |========================                                              |  34%
  |                                                                            
  |========================                                              |  35%
  |                                                                            
  |=========================                                             |  35%
  |                                                                            
  |=========================                                             |  36%
  |                                                                            
  |==========================                                            |  36%
  |                                                                            
  |==========================                                            |  37%
  |                                                                            
  |==========================                                            |  38%
  |                                                                            
  |===========================                                           |  38%
  |                                                                            
  |===========================                                           |  39%
  |                                                                            
  |============================                                          |  39%
  |                                                                            
  |============================                                          |  40%
  |                                                                            
  |============================                                          |  41%
  |                                                                            
  |=============================                                         |  41%
  |                                                                            
  |=============================                                         |  42%
  |                                                                            
  |==============================                                        |  42%
  |                                                                            
  |==============================                                        |  43%
  |                                                                            
  |==============================                                        |  44%
  |                                                                            
  |===============================                                       |  44%
  |                                                                            
  |===============================                                       |  45%
  |                                                                            
  |================================                                      |  45%
  |                                                                            
  |================================                                      |  46%
  |                                                                            
  |=================================                                     |  46%
  |                                                                            
  |=================================                                     |  47%
  |                                                                            
  |=================================                                     |  48%
  |                                                                            
  |==================================                                    |  48%
  |                                                                            
  |==================================                                    |  49%
  |                                                                            
  |===================================                                   |  49%
  |                                                                            
  |===================================                                   |  50%
  |                                                                            
  |===================================                                   |  51%
  |                                                                            
  |====================================                                  |  51%
  |                                                                            
  |====================================                                  |  52%
  |                                                                            
  |=====================================                                 |  52%
  |                                                                            
  |=====================================                                 |  53%
  |                                                                            
  |=====================================                                 |  54%
  |                                                                            
  |======================================                                |  54%
  |                                                                            
  |======================================                                |  55%
  |                                                                            
  |=======================================                               |  55%
  |                                                                            
  |=======================================                               |  56%
  |                                                                            
  |========================================                              |  56%
  |                                                                            
  |========================================                              |  57%
  |                                                                            
  |========================================                              |  58%
  |                                                                            
  |=========================================                             |  58%
  |                                                                            
  |=========================================                             |  59%
  |                                                                            
  |==========================================                            |  59%
  |                                                                            
  |==========================================                            |  60%
  |                                                                            
  |==========================================                            |  61%
  |                                                                            
  |===========================================                           |  61%
  |                                                                            
  |===========================================                           |  62%
  |                                                                            
  |============================================                          |  62%
  |                                                                            
  |============================================                          |  63%
  |                                                                            
  |============================================                          |  64%
  |                                                                            
  |=============================================                         |  64%
  |                                                                            
  |=============================================                         |  65%
  |                                                                            
  |==============================================                        |  65%
  |                                                                            
  |==============================================                        |  66%
  |                                                                            
  |===============================================                       |  66%
  |                                                                            
  |===============================================                       |  67%
  |                                                                            
  |===============================================                       |  68%
  |                                                                            
  |================================================                      |  68%
  |                                                                            
  |================================================                      |  69%
  |                                                                            
  |=================================================                     |  69%
  |                                                                            
  |=================================================                     |  70%
  |                                                                            
  |=================================================                     |  71%
  |                                                                            
  |==================================================                    |  71%
  |                                                                            
  |==================================================                    |  72%
  |                                                                            
  |===================================================                   |  72%
  |                                                                            
  |===================================================                   |  73%
  |                                                                            
  |===================================================                   |  74%
  |                                                                            
  |====================================================                  |  74%
  |                                                                            
  |====================================================                  |  75%
  |                                                                            
  |=====================================================                 |  75%
  |                                                                            
  |=====================================================                 |  76%Error in rma.uni(x$yi, x$vi, weights = x$weights, mods = cbind(X[sample(x$k),  : 
##   Fisher scoring algorithm did not converge. See 'help(rma)' for possible remedies.
## 
  |                                                                            
  |======================================================                |  76%
  |                                                                            
  |======================================================                |  77%
  |                                                                            
  |======================================================                |  78%
  |                                                                            
  |=======================================================               |  78%
  |                                                                            
  |=======================================================               |  79%
  |                                                                            
  |========================================================              |  79%
  |                                                                            
  |========================================================              |  80%
  |                                                                            
  |========================================================              |  81%
  |                                                                            
  |=========================================================             |  81%
  |                                                                            
  |=========================================================             |  82%
  |                                                                            
  |==========================================================            |  82%
  |                                                                            
  |==========================================================            |  83%
  |                                                                            
  |==========================================================            |  84%
  |                                                                            
  |===========================================================           |  84%
  |                                                                            
  |===========================================================           |  85%
  |                                                                            
  |============================================================          |  85%
  |                                                                            
  |============================================================          |  86%
  |                                                                            
  |=============================================================         |  86%
  |                                                                            
  |=============================================================         |  87%
  |                                                                            
  |=============================================================         |  88%
  |                                                                            
  |==============================================================        |  88%
  |                                                                            
  |==============================================================        |  89%
  |                                                                            
  |===============================================================       |  89%
  |                                                                            
  |===============================================================       |  90%
  |                                                                            
  |===============================================================       |  91%
  |                                                                            
  |================================================================      |  91%
  |                                                                            
  |================================================================      |  92%
  |                                                                            
  |=================================================================     |  92%
  |                                                                            
  |=================================================================     |  93%
  |                                                                            
  |=================================================================     |  94%
  |                                                                            
  |==================================================================    |  94%
  |                                                                            
  |==================================================================    |  95%
  |                                                                            
  |===================================================================   |  95%
  |                                                                            
  |===================================================================   |  96%
  |                                                                            
  |====================================================================  |  96%
  |                                                                            
  |====================================================================  |  97%
  |                                                                            
  |====================================================================  |  98%
  |                                                                            
  |===================================================================== |  98%
  |                                                                            
  |===================================================================== |  99%
  |                                                                            
  |======================================================================|  99%
  |                                                                            
  |======================================================================| 100%
## 
## Test of Moderators (coefficients 2:16):
## F(df1 = 15, df2 = 21) = 0.7718, p-val* = 0.7910
## 
## Model Results:
## 
##                                estimate      se     tval   pval*    ci.lb 
## intrcpt                         -2.8360  2.3831  -1.1900  0.3720  -7.7920 
## experienceExperienced           -0.4050  0.3759  -1.0772  0.3470  -1.1868 
## experienceNon-experienced        0.0277  0.6576   0.0421  0.9720  -1.3400 
## populationElderly                3.8950  2.6295   1.4813  0.1510  -1.5733 
## populationGeneral                1.1510  2.1352   0.5391  0.6020  -3.2893 
## populationNeck Immobilization    1.2235  2.1446   0.5705  0.5910  -3.2365 
## populationObese                  2.3053  2.2080   1.0440  0.3180  -2.2865 
## inducerPropofol                 -0.3663  0.3210  -1.1410  0.3370  -1.0339 
## inducerPropofol/Thiopental       1.6845  1.6562   1.0171  0.3750  -1.7597 
## def.scaleEase of intubation      0.5083  1.3477   0.3771  0.7180  -2.2945 
## def.scaleease of intubation\n    1.7631  1.5994   1.1023  0.2850  -1.5631 
## def.scaleIDS                     1.5929  1.0510   1.5156  0.2180  -0.5928 
## def.scaleKaur own                1.3213  1.2329   1.0717  0.3360  -1.2427 
## def.scaleLascarrou own           1.6520  1.3978   1.1818  0.2300  -1.2549 
## def.scaleScale                   2.6794  1.4156   1.8928  0.0670  -0.2644 
## def.scaleScale 1-3               1.8050  1.4740   1.2245  0.2490  -1.2604 
##                                 ci.ub 
## intrcpt                        2.1200    
## experienceExperienced          0.3769    
## experienceNon-experienced      1.3953    
## populationElderly              9.3632    
## populationGeneral              5.5913    
## populationNeck Immobilization  5.6836    
## populationObese                6.8970    
## inducerPropofol                0.3013    
## inducerPropofol/Thiopental     5.1288    
## def.scaleEase of intubation    3.3111    
## def.scaleease of intubation\n  5.0894    
## def.scaleIDS                   3.7786    
## def.scaleKaur own              3.8853    
## def.scaleLascarrou own         4.5589    
## def.scaleScale                 5.6233  . 
## def.scaleScale 1-3             4.8704    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
model_difficult.2<-metafor::rma.uni(ai=fdif.e1,n1i = fdif.t1,ci=fdif.e2,n2i = fdif.t2,data = difficult,method = "ML", measure = "RR", mods = ~def.scale,test = "knha")
model_difficult.2
## 
## Mixed-Effects Model (k = 37; tau^2 estimator: ML)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.1797 (SE = 0.0742)
## tau (square root of estimated tau^2 value):             0.4239
## I^2 (residual heterogeneity / unaccounted variability): 82.29%
## H^2 (unaccounted variability / sampling variability):   5.65
## R^2 (amount of heterogeneity accounted for):            20.33%
## 
## Test for Residual Heterogeneity:
## QE(df = 27) = 195.3100, p-val < .0001
## 
## Test of Moderators (coefficients 2:10):
## F(df1 = 9, df2 = 27) = 0.8050, p-val = 0.6154
## 
## Model Results:
## 
##                                estimate      se     tval    pval    ci.lb 
## intrcpt                         -2.2362  0.8913  -2.5090  0.0184  -4.0650 
## def.scaleEase of intubation      1.4425  1.0926   1.3202  0.1978  -0.7993 
## def.scaleease of intubation\n    1.5431  1.3899   1.1102  0.2767  -1.3088 
## def.scaleIDS                     1.5348  0.8998   1.7056  0.0996  -0.3115 
## def.scaleKaur own                1.1013  1.0998   1.0013  0.3256  -1.1553 
## def.scaleKulkarni own            1.8308  1.4157   1.2932  0.2069  -1.0741 
## def.scaleKurnarz own             2.5239  1.3110   1.9252  0.0648  -0.1661 
## def.scaleLascarrou own           2.2309  1.0922   2.0425  0.0510  -0.0102 
## def.scaleScale                   2.4594  1.2434   1.9779  0.0582  -0.0920 
## def.scaleScale 1-3               2.0176  1.1968   1.6858  0.1034  -0.4380 
##                                  ci.ub 
## intrcpt                        -0.4075  * 
## def.scaleEase of intubation     3.6843    
## def.scaleease of intubation\n   4.3950    
## def.scaleIDS                    3.3811  . 
## def.scaleKaur own               3.3578    
## def.scaleKulkarni own           4.7356    
## def.scaleKurnarz own            5.2139  . 
## def.scaleLascarrou own          4.4719  . 
## def.scaleScale                  5.0107  . 
## def.scaleScale 1-3              4.4731    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Subgroup analysis for Difficult Intubation by scale definition
meta::update.meta(mbin_difficult_random, byvar = def.scale, tau.common = FALSE)
##                            RR           95%-CI %W(random)            def.scale
## Agrawal 2020           0.3827 [0.2588; 0.5659]        3.9                  IDS
## Ahmad 2016             1.0000 [0.9774; 1.0231]        4.3                  IDS
## Ali 2017               0.7083 [0.4940; 1.0157]        4.0                  IDS
## Aoi 2010               0.1875 [0.0667; 0.5272]        2.5                  IDS
## Arima 2014             0.6235 [0.3964; 0.9807]        3.8                  IDS
## Barak 2007             0.8036 [0.2655; 2.4320]        2.4            Scale 1-3
## Wasem 2013             0.5000 [0.2530; 0.9882]        3.3                  IDS
## Yao 2015               1.2500 [0.3573; 4.3736]        2.1                Scale
## Bhalla 2018            1.7143 [0.9434; 3.1151]        3.5                  IDS
## Bhandari 2013          0.0435 [0.0062; 0.3067]        1.2                     
## Bharti 2014            0.6107 [0.3509; 1.0628]        3.6                  IDS
## Chandrashekaraiah 2017 0.9565 [0.7140; 1.2815]        4.1                  IDS
## Di Marco 2011          0.6500 [0.3612; 1.1698]        3.5                  IDS
## Hamp 2015              0.2941 [0.0362; 2.3868]        1.1                     
## Hosalli 2017           0.3333 [0.1794; 0.6195]        3.4                  IDS
## Kaur 2020              0.3214 [0.1524; 0.6780]        3.1             Kaur own
## Kim 2013               0.1556 [0.0594; 0.4074]        2.7                  IDS
## Kumar_2 2019           0.1786 [0.0798; 0.3997]        3.0                  IDS
## Kunaz 2016             1.3333 [0.3144; 5.6542]        1.8          Kurnarz own
## Lascarrou 2017         0.9946 [0.4879; 2.0277]        3.2        Lascarrou own
## Maharaj 2006           0.3125 [0.1313; 0.7439]        2.9                  IDS
## Maharaj 2007           0.0714 [0.0103; 0.4930]        1.2                  IDS
## Maharaj 2008           0.2683 [0.1313; 0.5484]        3.2                  IDS
## Malik 2008             0.5513 [0.4261; 0.7133]        4.1                  IDS
## Malik_1 2009           0.4167 [0.2940; 0.5906]        4.0                  IDS
## Malik_2 2009           0.1818 [0.0712; 0.4643]        2.7                  IDS
## Mcelwain 2011          0.7055 [0.5318; 0.9360]        4.1                  IDS
## Kulkarni 2013          0.6667 [0.1198; 3.7087]        1.5         Kulkarni own
## Ndoko 2008             0.0909 [0.0052; 1.6036]        0.7                  IDS
## Pappu 2020             0.2000 [0.0100; 3.9955]        0.6                  IDS
## Pazur 2016             0.8889 [0.4067; 1.9430]        3.1                  IDS
## Reena 2019             0.5000 [0.0959; 2.6074]        1.5 ease of intubation\n
## Ruetzeler 2020         0.4522 [0.2214; 0.9234]        3.2   Ease of intubation
## Tolon 2012             0.2857 [0.1136; 0.7186]        2.8                  IDS
## Vijayakumar 2016       0.7000 [0.2924; 1.6759]        2.9                  IDS
## Yoo 2018               0.3333 [0.0375; 2.9625]        1.0                  IDS
## 
## Number of studies combined: k = 36
## 
##                          RR           95%-CI     t  p-value
## Random effects model 0.4822 [0.3793; 0.6131] -6.17 < 0.0001
## Prediction interval         [0.1308; 1.7778]               
## 
## Quantifying heterogeneity:
##  tau^2 = 0.3981 [0.1156; 0.6790]; tau = 0.6310 [0.3400; 0.8240];
##  I^2 = 84.1% [78.8%; 88.0%]; H = 2.51 [2.17; 2.89]
## 
## Quantifying residual heterogeneity:
##  I^2 = 86.8% [81.9%; 90.3%]; H = 2.75 [2.35; 3.21]
## 
## Test of heterogeneity:
##       Q d.f.  p-value
##  219.74   35 < 0.0001
## 
## Results for subgroups (random effects model):
##                                    k     RR               95%-CI  tau^2    tau
## def.scale =                        2 0.1090 [0.0000; 20326.3719] 0.8428 0.9180
## def.scale = Ease of intubation     1 0.4522 [0.2214;     0.9234]     --     --
## def.scale = ease of intubation\n   1 0.5000 [0.0959;     2.6074]     --     --
## def.scale = IDS                   26 0.4679 [0.3566;     0.6140] 0.3615 0.6013
## def.scale = Kaur own               1 0.3214 [0.1524;     0.6780]     --     --
## def.scale = Kulkarni own           1 0.6667 [0.1198;     3.7087]     --     --
## def.scale = Kurnarz own            1 1.3333 [0.3144;     5.6542]     --     --
## def.scale = Lascarrou own          1 0.9946 [0.4879;     2.0277]     --     --
## def.scale = Scale                  1 1.2500 [0.3573;     4.3736]     --     --
## def.scale = Scale 1-3              1 0.8036 [0.2655;     2.4320]     --     --
##                                       Q   I^2
## def.scale =                        1.71 41.6%
## def.scale = Ease of intubation     0.00    --
## def.scale = ease of intubation\n   0.00    --
## def.scale = IDS                  194.63 87.2%
## def.scale = Kaur own               0.00    --
## def.scale = Kulkarni own           0.00    --
## def.scale = Kurnarz own            0.00    --
## def.scale = Lascarrou own          0.00    --
## def.scale = Scale                  0.00    --
## def.scale = Scale 1-3              0.00    --
## 
## Test for subgroup differences (random effects model):
##                      Q d.f. p-value
## Between groups   12.40    9  0.1915
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Continuity correction of 0.5 in studies with zero cell frequencies
#Funnel Plot for Difficult Intubation
meta::funnel(mbin_difficult_random,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))
meta::metabias(mbin_difficult_random, method.bias = "linreg")
## 
##  Linear regression test of funnel plot asymmetry
## 
## data:  mbin_difficult_random
## t = -6.9546, df = 34, p-value = 5.104e-08
## alternative hypothesis: asymmetry in funnel plot
## sample estimates:
##        bias     se.bias   intercept 
## -2.02001303  0.29045744  0.01886657
dmetar::eggers.test(mbin_difficult_random)
##              Intercept ConfidenceInterval      t p
## Egger's test     -2.02      -2.608--1.432 -6.955 0
meta::funnel(mbin_difficult_random,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))

trimfill_difficult<-meta::trimfill(mbin_difficult_random)
trimfill_difficult
##                             RR             95%-CI %W(random)
## Agrawal 2020            0.3827 [0.2588;   0.5659]        2.3
## Ahmad 2016              1.0000 [0.9774;   1.0231]        2.3
## Ali 2017                0.7083 [0.4940;   1.0157]        2.3
## Aoi 2010                0.1875 [0.0667;   0.5272]        2.0
## Arima 2014              0.6235 [0.3964;   0.9807]        2.3
## Barak 2007              0.8036 [0.2655;   2.4320]        1.9
## Wasem 2013              0.5000 [0.2530;   0.9882]        2.2
## Yao 2015                1.2500 [0.3573;   4.3736]        1.8
## Bhalla 2018             1.7143 [0.9434;   3.1151]        2.2
## Bhandari 2013           0.0435 [0.0062;   0.3067]        1.4
## Bharti 2014             0.6107 [0.3509;   1.0628]        2.2
## Chandrashekaraiah 2017  0.9565 [0.7140;   1.2815]        2.3
## Di Marco 2011           0.6500 [0.3612;   1.1698]        2.2
## Hamp 2015               0.2941 [0.0362;   2.3868]        1.3
## Hosalli 2017            0.3333 [0.1794;   0.6195]        2.2
## Kaur 2020               0.3214 [0.1524;   0.6780]        2.1
## Kim 2013                0.1556 [0.0594;   0.4074]        2.0
## Kumar_2 2019            0.1786 [0.0798;   0.3997]        2.1
## Kunaz 2016              1.3333 [0.3144;   5.6542]        1.7
## Lascarrou 2017          0.9946 [0.4879;   2.0277]        2.1
## Maharaj 2006            0.3125 [0.1313;   0.7439]        2.1
## Maharaj 2007            0.0714 [0.0103;   0.4930]        1.4
## Maharaj 2008            0.2683 [0.1313;   0.5484]        2.1
## Malik 2008              0.5513 [0.4261;   0.7133]        2.3
## Malik_1 2009            0.4167 [0.2940;   0.5906]        2.3
## Malik_2 2009            0.1818 [0.0712;   0.4643]        2.0
## Mcelwain 2011           0.7055 [0.5318;   0.9360]        2.3
## Kulkarni 2013           0.6667 [0.1198;   3.7087]        1.5
## Ndoko 2008              0.0909 [0.0052;   1.6036]        0.9
## Pappu 2020              0.2000 [0.0100;   3.9955]        0.9
## Pazur 2016              0.8889 [0.4067;   1.9430]        2.1
## Reena 2019              0.5000 [0.0959;   2.6074]        1.6
## Ruetzeler 2020          0.4522 [0.2214;   0.9234]        2.1
## Tolon 2012              0.2857 [0.1136;   0.7186]        2.0
## Vijayakumar 2016        0.7000 [0.2924;   1.6759]        2.1
## Yoo 2018                0.3333 [0.0375;   2.9625]        1.2
## Filled: Malik_1 2009    2.3425 [1.6527;   3.3200]        2.3
## Filled: Agrawal 2020    2.5503 [1.7247;   3.7709]        2.3
## Filled: Yoo 2018        2.9281 [0.3295;  26.0234]        1.2
## Filled: Hosalli 2017    2.9281 [1.5755;   5.4419]        2.2
## Filled: Kaur 2020       3.0365 [1.4395;   6.4054]        2.1
## Filled: Maharaj 2006    3.1233 [1.3120;   7.4352]        2.1
## Filled: Hamp 2015       3.3185 [0.4089;  26.9303]        1.3
## Filled: Tolon 2012      3.4161 [1.3583;   8.5915]        2.0
## Filled: Maharaj 2008    3.6379 [1.7798;   7.4357]        2.1
## Filled: Pappu 2020      4.8801 [0.2443;  97.4913]        0.9
## Filled: Aoi 2010        5.2055 [1.8514;  14.6356]        2.0
## Filled: Malik_2 2009    5.3681 [2.1022;  13.7078]        2.0
## Filled: Kumar_2 2019    5.4657 [2.4416;  12.2355]        2.1
## Filled: Kim 2013        6.2744 [2.3959;  16.4315]        2.0
## Filled: Ndoko 2008     10.7362 [0.6086; 189.3836]        0.9
## Filled: Maharaj 2007   13.6643 [1.9799;  94.3064]        1.4
## Filled: Bhandari 2013  22.4485 [3.1825; 158.3466]        1.4
## 
## Number of studies combined: k = 53 (with 17 added studies)
## 
##                          RR           95%-CI     t p-value
## Random effects model 0.8946 [0.6298; 1.2707] -0.64  0.5271
## Prediction interval         [0.0809; 9.8889]              
## 
## Quantifying heterogeneity:
##  tau^2 = 1.4019 [0.7663; 2.2272]; tau = 1.1840 [0.8754; 1.4924];
##  I^2 = 86.7% [83.4%; 89.4%]; H = 2.75 [2.46; 3.07]
## 
## Test of heterogeneity:
##       Q d.f.  p-value
##  391.91   52 < 0.0001
## 
## Details on meta-analytical method:
## - Inverse variance method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Trim-and-fill method to adjust for funnel plot asymmetry

Difficult Laryngoscopy by Cormack and Lehane Classification

cormack<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Table Pairwise VLs Cormack.csv")
length(cormack$fcormack.e1)
## [1] 92
#Number of comparisons with zero events in both arms
cormack_zeros<-dplyr::filter(cormack,cormack$fcormack.e1==0 & cormack$fcormack.e2==0)
length(cormack_zeros$fcormack.e1)
## [1] 12
#Table for Meta-analysis of Cormack and Lehane
cormack_analysis<-dplyr::filter(cormack,cormack$fcormack.e1>0 | cormack$fcormack.e2>0)

#Number of comparisons and patients meta-analized for Cormack and Lehane
length(cormack_analysis$fcormack.e1)
## [1] 80
sum(cormack_analysis$fcormack.t1,cormack_analysis$fcormack.t2)
## [1] 9324
#Meta-analysis for Cormack and Lehane
mbin_cormack_random<-meta::metabin(fcormack.e1,fcormack.t1,fcormack.e2,fcormack.t2,data = cormack_analysis,studlab = paste(author, year),comb.fixed = FALSE,comb.random = TRUE,method.tau = "SJ",hakn = TRUE,prediction = TRUE,incr = 0.5,sm = "RR")
mbin_cormack_random
##                            RR             95%-CI %W(random)
## Abdallah 2011          0.6236 [0.2634;   1.4765]        2.2
## Abdelgalel 2018        0.2083 [0.0788;   0.5506]        2.1
## Agrawal 2020           0.0164 [0.0010;   0.2591]        0.8
## Ahmad 2016             0.0483 [0.0121;   0.1920]        1.7
## Akbar 2015             0.6667 [0.1169;   3.8014]        1.4
## Al-Ghamdi 2016         0.4583 [0.1112;   1.8894]        1.6
## Ali 2017               0.2500 [0.0578;   1.0814]        1.6
## Altun 2018             1.0000 [0.2637;   3.7921]        1.7
## Andersen 2011          0.1429 [0.0342;   0.5962]        1.6
## Aoi 2010               0.0526 [0.0033;   0.8362]        0.8
## Aqil 2016              0.2500 [0.0292;   2.1399]        1.1
## Aqil 2017              0.2143 [0.0644;   0.7130]        1.9
## Arici 2014             5.0000 [0.2477; 100.9273]        0.7
## Aziz 2012              0.3523 [0.1776;   0.6991]        2.4
## Barak 2007             0.0865 [0.0049;   1.5111]        0.7
## Bashir 2020            0.2000 [0.0099;   4.0371]        0.7
## Colak 2019             0.7000 [0.2924;   1.6759]        2.2
## El-Tahan 2018          1.3465 [0.4885;   3.7120]        2.1
## Huang 2020             0.0342 [0.0020;   0.5786]        0.7
## Bhat 2015              0.2667 [0.0951;   0.7476]        2.0
## Bilehjani 2009         0.3169 [0.0133;   7.5444]        0.6
## Blajic 2019            0.4958 [0.1824;   1.3478]        2.1
## Cakir 2020             1.0000 [0.6550;   1.5266]        2.6
## Chandrashekaraiah 2017 0.5000 [0.0989;   2.5270]        1.5
## Colak 2015             0.1043 [0.0125;   0.8678]        1.1
## Dey 2020               0.3086 [0.1602;   0.5947]        2.4
## Erden 2010             0.3333 [0.0387;   2.8745]        1.1
## Erturk 2015            0.2500 [0.0566;   1.1051]        1.6
## Gao 2018               0.9491 [0.5035;   1.7891]        2.4
## Gupta 2020             0.0476 [0.0029;   0.7857]        0.7
## Hamp 2015              0.1302 [0.0075;   2.2530]        0.7
## Hosalli 2017           0.1429 [0.0077;   2.6497]        0.7
## Hu 2017                0.1111 [0.0061;   2.0369]        0.7
## Ilyas 2014             0.0169 [0.0010;   0.2744]        0.8
## Jungbauer 2009         0.2778 [0.1460;   0.5286]        2.4
## Kaur 2020              0.0265 [0.0016;   0.4436]        0.7
## Khan 2008              0.1429 [0.0078;   2.6093]        0.7
## Kido 2015              0.0909 [0.0053;   1.5599]        0.7
## Kim 2013               0.0337 [0.0021;   0.5303]        0.8
## Kleine-Brueggeney 2017 0.2520 [0.1949;   0.3259]        2.7
## Kunaz 2016             0.1111 [0.0061;   2.0105]        0.7
## Laosuwan 2015          0.1429 [0.0083;   2.4653]        0.7
## Lascarrou 2017         0.5486 [0.3213;   0.9367]        2.5
## Lim 2005               0.0588 [0.0035;   0.9748]        0.7
## Lin 2012               0.1976 [0.0236;   1.6548]        1.1
## Liu 2016               0.0674 [0.0039;   1.1627]        0.7
## Liu 2019               0.0076 [0.0005;   0.1219]        0.8
## Maharaj 2006           0.3333 [0.0141;   7.8648]        0.6
## Maharaj 2007           0.0667 [0.0041;   1.0925]        0.8
## Maharaj 2008           0.0286 [0.0018;   0.4441]        0.8
## Mahmood 2015           0.2000 [0.0100;   3.9955]        0.7
## Malik 2008             0.1333 [0.0273;   0.6518]        1.5
## Malik1 2009            0.0297 [0.0018;   0.4944]        0.7
## Malik2 2009            0.2000 [0.0100;   3.9955]        0.7
## Mcelwain 2011          0.2138 [0.0730;   0.6260]        2.0
## Kulkarni 2013          0.3333 [0.0141;   7.8648]        0.6
## Inangil 2018           0.1111 [0.0062;   1.9882]        0.7
## Jafra 2018             0.1111 [0.0061;   2.0369]        0.7
## Ndoko 2008             0.0435 [0.0026;   0.7193]        0.7
## Ninan 2016             0.3333 [0.0141;   7.8648]        0.6
## Nishiyama 2011         1.6509 [0.6054;   4.5022]        2.1
## Parasa 2016            0.2500 [0.0296;   2.1081]        1.1
## Pazur 2016             0.2000 [0.0101;   3.9701]        0.7
## Ranieri 2012           0.0628 [0.0037;   1.0770]        0.7
## Reena 2019             0.2222 [0.0505;   0.9774]        1.6
## Risse 2020             0.3043 [0.0129;   7.2021]        0.6
## Ruetzeler 2020         0.2864 [0.0826;   0.9927]        1.8
## Shah 2016              0.3224 [0.0137;   7.6018]        0.6
## Sun 2005               0.8333 [0.4453;   1.5594]        2.5
## Takenaka 2011          0.1944 [0.0097;   3.9039]        0.7
## Teoh 2010              0.2000 [0.0487;   0.8219]        1.6
## Toker 2019             0.1538 [0.0366;   0.6469]        1.6
## Tolon 2012             0.2000 [0.0102;   3.9140]        0.7
## Varsha 2019            0.0435 [0.0027;   0.7100]        0.8
## Vijayakumar 2016       0.0400 [0.0024;   0.6556]        0.8
## Walker 2009            3.0000 [0.1247;  72.1913]        0.6
## Yoo 2018               0.1000 [0.0140;   0.7162]        1.2
## Yumul 2016             0.1253 [0.0430;   0.3648]        2.0
## Cavus 2011             0.7857 [0.3244;   1.9033]        2.2
## Lee 2012               0.0375 [0.0021;   0.6733]        0.7
## 
## Number of studies combined: k = 80
## 
##                          RR           95%-CI      t  p-value
## Random effects model 0.2412 [0.1883; 0.3090] -11.43 < 0.0001
## Prediction interval         [0.0423; 1.3769]                
## 
## Quantifying heterogeneity:
##  tau^2 = 0.7501 [0.1422; 0.8111]; tau = 0.8661 [0.3771; 0.9006];
##  I^2 = 51.0% [36.6%; 62.1%]; H = 1.43 [1.26; 1.63]
## 
## Test of heterogeneity:
##       Q d.f.  p-value
##  161.26   79 < 0.0001
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Continuity correction of 0.5 in studies with zero cell frequencies
#Estimated probability of difficult laryngoscopy (C&L≥3) with Macintosh
meta::metaprop(event = fcormack.e2,n = fcormack.t2 ,studlab = paste(author,year),data = cormack,method = "GLMM",sm = "PLOGIT",comb.fixed = FALSE,comb.random = TRUE,hakn = TRUE)
##                        proportion           95%-CI
## Abdallah 2011              0.2245 [0.1177; 0.3662]
## Abdelgalel 2018            0.3000 [0.1656; 0.4653]
## Abdelgawad 2015            0.0000 [0.0000; 0.1684]
## Aggarwal 2019              0.0000 [0.0000; 0.0711]
## Agrawal 2020               0.7500 [0.5880; 0.8731]
## Ahmad 2016                 0.5312 [0.4266; 0.6339]
## Akbar 2015                 0.0667 [0.0140; 0.1827]
## Al-Ghamdi 2016             0.1364 [0.0291; 0.3491]
## Ali 2017                   0.2667 [0.1228; 0.4589]
## Altun 2018                 0.0750 [0.0157; 0.2039]
## Andersen 2011              0.2800 [0.1623; 0.4249]
## Aoi 2010                   0.5294 [0.2781; 0.7702]
## Aqil 2016                  0.1000 [0.0279; 0.2366]
## Aqil 2017                  0.2000 [0.1139; 0.3127]
## Arici 2014                 0.0000 [0.0000; 0.0881]
## Arslan 2017                0.0000 [0.0000; 0.0881]
## Aziz 2012                  0.1905 [0.1305; 0.2634]
## Bakshi 2019                0.0000 [0.0000; 0.0974]
## Barak 2007                 0.0667 [0.0249; 0.1395]
## Bashir 2020                0.0500 [0.0061; 0.1692]
## Colak 2019                 0.2222 [0.1120; 0.3709]
## El-Tahan 2018              0.1250 [0.0351; 0.2899]
## Huang 2020                 0.2333 [0.0993; 0.4228]
## Wasem 2013                 0.0000 [0.0000; 0.1157]
## Yao 2015                   0.0000 [0.0000; 0.0740]
## Bhalla 2018                0.0000 [0.0000; 0.2180]
## Bhat 2015                  0.3000 [0.1786; 0.4461]
## Bilehjani 2009             0.0263 [0.0007; 0.1381]
## Blajic 2019                0.1186 [0.0491; 0.2293]
## Cakir 2020                 0.5806 [0.3908; 0.7545]
## Caparlar 2019              0.0000 [0.0000; 0.0903]
## Chandrashekaraiah 2017     0.1333 [0.0376; 0.3072]
## Colak 2015                 0.1020 [0.0340; 0.2223]
## Dey 2020                   0.3000 [0.2163; 0.3948]
## Erden 2010                 0.1875 [0.0405; 0.4565]
## Erturk 2015                0.2000 [0.0905; 0.3565]
## Gao 2018                   0.1951 [0.1158; 0.2974]
## Gupta 2020                 0.2500 [0.1269; 0.4120]
## Hamp 2015                  0.2000 [0.0573; 0.4366]
## Hosalli 2017               0.1000 [0.0211; 0.2653]
## Hu 2017                    0.0400 [0.0110; 0.0993]
## Ilyas 2014                 0.2283 [0.1586; 0.3112]
## Jungbauer 2009             0.3600 [0.2664; 0.4621]
## Kaur 2020                  0.2250 [0.1084; 0.3845]
## Khan 2008                  0.1364 [0.0291; 0.3491]
## Kido 2015                  0.2000 [0.0683; 0.4070]
## Kim 2013                   0.6522 [0.4273; 0.8362]
## Kleine-Brueggeney 2017     0.6833 [0.5922; 0.7652]
## Kunaz 2016                 0.0800 [0.0222; 0.1923]
## Laosuwan 2015              0.2727 [0.0602; 0.6097]
## Lascarrou 2017             0.1864 [0.1319; 0.2517]
## Lim 2005                   0.2667 [0.1228; 0.4589]
## Lin 2012                   0.0610 [0.0201; 0.1366]
## Liu 2016                   0.0787 [0.0322; 0.1554]
## Liu 2019                   0.3646 [0.2945; 0.4393]
## Maharaj 2006               0.0333 [0.0008; 0.1722]
## Maharaj 2007               0.3500 [0.1539; 0.5922]
## Maharaj 2008               0.8500 [0.6211; 0.9679]
## Mahmood 2015               0.0667 [0.0082; 0.2207]
## Malik 2008                 0.1667 [0.0564; 0.3472]
## Malik1 2009                0.3200 [0.1495; 0.5350]
## Malik2 2009                0.0667 [0.0082; 0.2207]
## Maruyama 2008              0.0000 [0.0000; 0.2646]
## Mcelwain 2011              0.3226 [0.1668; 0.5137]
## Kulkarni 2013              0.0333 [0.0008; 0.1722]
## Inangil 2018               0.1143 [0.0320; 0.2674]
## Ing 2017                   0.0000 [0.0000; 0.2059]
## Jafra 2018                 0.0400 [0.0110; 0.0993]
## Ndoko 2008                 0.2075 [0.1084; 0.3411]
## Ninan 2016                 0.0333 [0.0008; 0.1722]
## Nishiyama 2011             0.1143 [0.0320; 0.2674]
## Parasa 2016                0.1333 [0.0376; 0.3072]
## Pazur 2016                 0.0769 [0.0095; 0.2513]
## Ranieri 2012               0.1094 [0.0451; 0.2125]
## Reena 2019                 0.1800 [0.0858; 0.3144]
## Risse 2020                 0.0323 [0.0008; 0.1670]
## Ruetzeler 2020             0.1587 [0.0788; 0.2726]
## Shah 2016                  0.0345 [0.0009; 0.1776]
## Sun 2005                   0.1800 [0.1103; 0.2695]
## Takenaka 2011              0.0588 [0.0072; 0.1968]
## Teoh 2010                  0.0500 [0.0164; 0.1128]
## Toker 2019                 0.2600 [0.1463; 0.4034]
## Tolon 2012                 0.1000 [0.0123; 0.3170]
## Tsan 2020                  0.0000 [0.0000; 0.0521]
## Varsha 2019                0.3143 [0.1685; 0.4929]
## Vijayakumar 2016           0.2667 [0.1460; 0.4194]
## Walker 2009                0.0000 [0.0000; 0.0596]
## Yoo 2018                   0.4545 [0.2439; 0.6779]
## Yumul 2016                 0.3548 [0.1923; 0.5463]
## Cavus 2011                 0.1400 [0.0582; 0.2674]
## El-Tahan 2017              0.0000 [0.0000; 0.2316]
## Lee 2012                   0.1600 [0.0454; 0.3608]
## 
## Number of studies combined: k = 92
## 
##                      proportion           95%-CI
## Random effects model     0.1265 [0.0973; 0.1630]
## 
## Quantifying heterogeneity:
##  tau^2 = 1.6434; tau = 1.2819; I^2 = 91.0%; H = 3.33
## 
## Test of heterogeneity:
##       Q d.f.  p-value             Test
##  478.45   91 < 0.0001        Wald-type
##  841.89   91 < 0.0001 Likelihood-Ratio
## 
## Details on meta-analytical method:
## - Random intercept logistic regression model
## - Maximum-likelihood estimator for tau^2
## - Hartung-Knapp adjustment for random effects model
## - Logit transformation
## - Clopper-Pearson confidence interval for individual studies
## - Continuity correction of 0.5 in studies with zero cell frequencies
##   (only used to calculate individual study results)
#Forest plot for Cormack and Lehane
meta::forest(mbin_cormack_random,sortvar=TE,lab.e="Videolaryngoscopes",lab.c="Macintosh",col.study="black",col.square="black",col.diamond="blue")

meta::forest(mbin_cormack_random,sortvar=TE,lab.e="Videolaryngoscopes",lab.c="Macintosh",col.study="black",col.square="black",col.diamond="blue")

#Detecting Outliers for Cormack and Lehane
dmetar::find.outliers(mbin_cormack_random)
## Identified outliers (random-effects model) 
## ------------------------------------------ 
## "El-Tahan 2018", "Cakir 2020", "Gao 2018", "Lascarrou 2017", "Liu 2019", "Nishiyama 2011", "Sun 2005", "Cavus 2011" 
##  
## Results with outliers removed 
## ----------------------------- 
##                            RR             95%-CI %W(random) exclude
## Abdallah 2011          0.6236 [0.2634;   1.4765]        3.0        
## Abdelgalel 2018        0.2083 [0.0788;   0.5506]        2.8        
## Agrawal 2020           0.0164 [0.0010;   0.2591]        0.8        
## Ahmad 2016             0.0483 [0.0121;   0.1920]        2.1        
## Akbar 2015             0.6667 [0.1169;   3.8014]        1.6        
## Al-Ghamdi 2016         0.4583 [0.1112;   1.8894]        2.0        
## Ali 2017               0.2500 [0.0578;   1.0814]        2.0        
## Altun 2018             1.0000 [0.2637;   3.7921]        2.1        
## Andersen 2011          0.1429 [0.0342;   0.5962]        2.0        
## Aoi 2010               0.0526 [0.0033;   0.8362]        0.8        
## Aqil 2016              0.2500 [0.0292;   2.1399]        1.2        
## Aqil 2017              0.2143 [0.0644;   0.7130]        2.4        
## Arici 2014             5.0000 [0.2477; 100.9273]        0.7        
## Aziz 2012              0.3523 [0.1776;   0.6991]        3.4        
## Barak 2007             0.0865 [0.0049;   1.5111]        0.8        
## Bashir 2020            0.2000 [0.0099;   4.0371]        0.7        
## Colak 2019             0.7000 [0.2924;   1.6759]        3.0        
## El-Tahan 2018          1.3465 [0.4885;   3.7120]        0.0       *
## Huang 2020             0.0342 [0.0020;   0.5786]        0.8        
## Bhat 2015              0.2667 [0.0951;   0.7476]        2.7        
## Bilehjani 2009         0.3169 [0.0133;   7.5444]        0.7        
## Blajic 2019            0.4958 [0.1824;   1.3478]        2.7        
## Cakir 2020             1.0000 [0.6550;   1.5266]        0.0       *
## Chandrashekaraiah 2017 0.5000 [0.0989;   2.5270]        1.7        
## Colak 2015             0.1043 [0.0125;   0.8678]        1.2        
## Dey 2020               0.3086 [0.1602;   0.5947]        3.4        
## Erden 2010             0.3333 [0.0387;   2.8745]        1.2        
## Erturk 2015            0.2500 [0.0566;   1.1051]        1.9        
## Gao 2018               0.9491 [0.5035;   1.7891]        0.0       *
## Gupta 2020             0.0476 [0.0029;   0.7857]        0.8        
## Hamp 2015              0.1302 [0.0075;   2.2530]        0.8        
## Hosalli 2017           0.1429 [0.0077;   2.6497]        0.8        
## Hu 2017                0.1111 [0.0061;   2.0369]        0.8        
## Ilyas 2014             0.0169 [0.0010;   0.2744]        0.8        
## Jungbauer 2009         0.2778 [0.1460;   0.5286]        3.4        
## Kaur 2020              0.0265 [0.0016;   0.4436]        0.8        
## Khan 2008              0.1429 [0.0078;   2.6093]        0.8        
## Kido 2015              0.0909 [0.0053;   1.5599]        0.8        
## Kim 2013               0.0337 [0.0021;   0.5303]        0.8        
## Kleine-Brueggeney 2017 0.2520 [0.1949;   0.3259]        4.0        
## Kunaz 2016             0.1111 [0.0061;   2.0105]        0.8        
## Laosuwan 2015          0.1429 [0.0083;   2.4653]        0.8        
## Lascarrou 2017         0.5486 [0.3213;   0.9367]        0.0       *
## Lim 2005               0.0588 [0.0035;   0.9748]        0.8        
## Lin 2012               0.1976 [0.0236;   1.6548]        1.2        
## Liu 2016               0.0674 [0.0039;   1.1627]        0.8        
## Liu 2019               0.0076 [0.0005;   0.1219]        0.0       *
## Maharaj 2006           0.3333 [0.0141;   7.8648]        0.7        
## Maharaj 2007           0.0667 [0.0041;   1.0925]        0.8        
## Maharaj 2008           0.0286 [0.0018;   0.4441]        0.8        
## Mahmood 2015           0.2000 [0.0100;   3.9955]        0.7        
## Malik 2008             0.1333 [0.0273;   0.6518]        1.8        
## Malik1 2009            0.0297 [0.0018;   0.4944]        0.8        
## Malik2 2009            0.2000 [0.0100;   3.9955]        0.7        
## Mcelwain 2011          0.2138 [0.0730;   0.6260]        2.6        
## Kulkarni 2013          0.3333 [0.0141;   7.8648]        0.7        
## Inangil 2018           0.1111 [0.0062;   1.9882]        0.8        
## Jafra 2018             0.1111 [0.0061;   2.0369]        0.8        
## Ndoko 2008             0.0435 [0.0026;   0.7193]        0.8        
## Ninan 2016             0.3333 [0.0141;   7.8648]        0.7        
## Nishiyama 2011         1.6509 [0.6054;   4.5022]        0.0       *
## Parasa 2016            0.2500 [0.0296;   2.1081]        1.2        
## Pazur 2016             0.2000 [0.0101;   3.9701]        0.7        
## Ranieri 2012           0.0628 [0.0037;   1.0770]        0.8        
## Reena 2019             0.2222 [0.0505;   0.9774]        1.9        
## Risse 2020             0.3043 [0.0129;   7.2021]        0.7        
## Ruetzeler 2020         0.2864 [0.0826;   0.9927]        2.3        
## Shah 2016              0.3224 [0.0137;   7.6018]        0.7        
## Sun 2005               0.8333 [0.4453;   1.5594]        0.0       *
## Takenaka 2011          0.1944 [0.0097;   3.9039]        0.7        
## Teoh 2010              0.2000 [0.0487;   0.8219]        2.0        
## Toker 2019             0.1538 [0.0366;   0.6469]        2.0        
## Tolon 2012             0.2000 [0.0102;   3.9140]        0.7        
## Varsha 2019            0.0435 [0.0027;   0.7100]        0.8        
## Vijayakumar 2016       0.0400 [0.0024;   0.6556]        0.8        
## Walker 2009            3.0000 [0.1247;  72.1913]        0.7        
## Yoo 2018               0.1000 [0.0140;   0.7162]        1.4        
## Yumul 2016             0.1253 [0.0430;   0.3648]        2.6        
## Cavus 2011             0.7857 [0.3244;   1.9033]        0.0       *
## Lee 2012               0.0375 [0.0021;   0.6733]        0.8        
## 
## Number of studies combined: k = 72
## 
##                          RR           95%-CI      t  p-value
## Random effects model 0.1985 [0.1592; 0.2476] -14.59 < 0.0001
## Prediction interval         [0.0485; 0.8122]                
## 
## Quantifying heterogeneity:
##  tau^2 = 0.4867 [0.0000; 0.2906]; tau = 0.6976 [0.0000; 0.5391];
##  I^2 = 0.0% [0.0%; 27.6%]; H = 1.00 [1.00; 1.18]
## 
## Test of heterogeneity:
##      Q d.f. p-value
##  70.46   71  0.4959
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Continuity correction of 0.5 in studies with zero cell frequencies
#Influence Analysis for Cormack and Lehane
inf_analysis_cormack<-dmetar::InfluenceAnalysis(mbin_cormack_random,random = TRUE)
## [===========================================================================] DONE
plot(inf_analysis_cormack,"baujat")

#Meta-regression for Cormack and Lehane

#Controling for population characteristics (general, obese, neck immobilization)
meta::metareg(mbin_cormack_random,population)
## 
## Mixed-Effects Model (k = 80; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.6931 (SE = 0.1662)
## tau (square root of estimated tau^2 value):             0.8325
## I^2 (residual heterogeneity / unaccounted variability): 59.00%
## H^2 (unaccounted variability / sampling variability):   2.44
## R^2 (amount of heterogeneity accounted for):            7.59%
## 
## Test for Residual Heterogeneity:
## QE(df = 75) = 132.4061, p-val < .0001
## 
## Test of Moderators (coefficients 2:5):
## F(df1 = 4, df2 = 75) = 2.6005, p-val = 0.0427
## 
## Model Results:
## 
##                                estimate      se     tval    pval    ci.lb 
## intrcpt                         -0.7922  0.6953  -1.1394  0.2582  -2.1774 
## populationGeneral               -0.5120  0.7117  -0.7193  0.4742  -1.9297 
## populationNeck Immobilization   -1.2845  0.7467  -1.7201  0.0895  -2.7720 
## populationObese                 -0.5425  0.7672  -0.7071  0.4817  -2.0709 
## populationPregnant Women         0.6419  0.9996   0.6422  0.5227  -1.3494 
##                                 ci.ub 
## intrcpt                        0.5929    
## populationGeneral              0.9058    
## populationNeck Immobilization  0.2031  . 
## populationObese                0.9859    
## populationPregnant Women       2.6333    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for operator experience
meta::metareg(mbin_cormack_random,experience)
## 
## Mixed-Effects Model (k = 80; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.7291 (SE = 0.1666)
## tau (square root of estimated tau^2 value):             0.8539
## I^2 (residual heterogeneity / unaccounted variability): 63.15%
## H^2 (unaccounted variability / sampling variability):   2.71
## R^2 (amount of heterogeneity accounted for):            2.79%
## 
## Test for Residual Heterogeneity:
## QE(df = 77) = 134.5042, p-val < .0001
## 
## Test of Moderators (coefficients 2:3):
## F(df1 = 2, df2 = 77) = 2.3391, p-val = 0.1032
## 
## Model Results:
## 
##                            estimate      se     tval    pval    ci.lb    ci.ub 
## intrcpt                     -1.1344  0.4030  -2.8148  0.0062  -1.9369  -0.3319 
## experienceExperienced       -0.4574  0.4289  -1.0663  0.2896  -1.3114   0.3967 
## experienceNon-experienced    0.1552  0.4813   0.3224  0.7480  -0.8031   1.1135 
##  
## intrcpt                    ** 
## experienceExperienced 
## experienceNon-experienced 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for intubation technique applied (regular, rapid sequence induction)
meta::metareg(mbin_cormack_random,technique)
## 
## Mixed-Effects Model (k = 80; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.7666 (SE = 0.1714)
## tau (square root of estimated tau^2 value):             0.8756
## I^2 (residual heterogeneity / unaccounted variability): 65.93%
## H^2 (unaccounted variability / sampling variability):   2.94
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 76) = 155.4201, p-val < .0001
## 
## Test of Moderators (coefficients 2:4):
## F(df1 = 3, df2 = 76) = 0.4594, p-val = 0.7114
## 
## Model Results:
## 
##                                            estimate      se     tval    pval 
## intrcpt                                     -1.0735  0.3418  -3.1409  0.0024 
## techniqueRapid Sequence Induction           -0.5908  0.7025  -0.8411  0.4029 
## techniqueRegular                            -0.4028  0.3694  -1.0903  0.2790 
## techniqueRegular/Rapid Sequence Induction   -0.1770  0.9955  -0.1778  0.8593 
##                                              ci.lb    ci.ub 
## intrcpt                                    -1.7542  -0.3928  ** 
## techniqueRapid Sequence Induction          -1.9899   0.8083     
## techniqueRegular                           -1.1386   0.3330     
## techniqueRegular/Rapid Sequence Induction  -2.1596   1.8056     
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for setting (operating room, out of hospital, etc.)
meta::metareg(mbin_cormack_random,set)
## 
## Mixed-Effects Model (k = 80; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.7357 (SE = 0.1664)
## tau (square root of estimated tau^2 value):             0.8578
## I^2 (residual heterogeneity / unaccounted variability): 65.13%
## H^2 (unaccounted variability / sampling variability):   2.87
## R^2 (amount of heterogeneity accounted for):            1.91%
## 
## Test for Residual Heterogeneity:
## QE(df = 78) = 156.3685, p-val < .0001
## 
## Test of Moderators (coefficient 2):
## F(df1 = 1, df2 = 78) = 2.5064, p-val = 0.1174
## 
## Model Results:
## 
##                    estimate      se     tval    pval    ci.lb    ci.ub 
## intrcpt             -0.8195  0.3982  -2.0581  0.0429  -1.6122  -0.0268  * 
## setOperating Room   -0.6629  0.4187  -1.5832  0.1174  -1.4964   0.1707    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for nature of procedure (elective, urgency)
meta::metareg(mbin_cormack_random,nature)
## 
## Mixed-Effects Model (k = 80; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.7369 (SE = 0.1679)
## tau (square root of estimated tau^2 value):             0.8584
## I^2 (residual heterogeneity / unaccounted variability): 64.99%
## H^2 (unaccounted variability / sampling variability):   2.86
## R^2 (amount of heterogeneity accounted for):            1.76%
## 
## Test for Residual Heterogeneity:
## QE(df = 77) = 153.1418, p-val < .0001
## 
## Test of Moderators (coefficients 2:3):
## F(df1 = 2, df2 = 77) = 1.7339, p-val = 0.1834
## 
## Model Results:
## 
##                 estimate      se     tval    pval    ci.lb   ci.ub 
## intrcpt          -0.7016  0.8548  -0.8207  0.4143  -2.4037  1.0006    
## natureElective   -0.7906  0.8645  -0.9145  0.3633  -2.5121  0.9309    
## natureUrgent      0.0041  0.9715   0.0042  0.9967  -1.9304  1.9386    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for inducer used
meta::metareg(mbin_cormack_random,inducer)
## 
## Mixed-Effects Model (k = 80; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.6510 (SE = 0.1655)
## tau (square root of estimated tau^2 value):             0.8069
## I^2 (residual heterogeneity / unaccounted variability): 61.81%
## H^2 (unaccounted variability / sampling variability):   2.62
## R^2 (amount of heterogeneity accounted for):            13.20%
## 
## Test for Residual Heterogeneity:
## QE(df = 72) = 131.0596, p-val < .0001
## 
## Test of Moderators (coefficients 2:8):
## F(df1 = 7, df2 = 72) = 2.5996, p-val = 0.0189
## 
## Model Results:
## 
##                             estimate      se     tval    pval    ci.lb    ci.ub 
## intrcpt                      -0.9906  0.2798  -3.5407  0.0007  -1.5483  -0.4329 
## inducerEtomidate             -3.8887  1.3842  -2.8093  0.0064  -6.6480  -1.1293 
## inducerMidazolam             -1.2348  0.8647  -1.4280  0.1576  -2.9585   0.4889 
## inducerPropofol              -0.5760  0.3135  -1.8374  0.0703  -1.2010   0.0489 
## inducerPropofol/Ketamine     -0.5780  0.8361  -0.6914  0.4916  -2.2448   1.0887 
## inducerPropofol/Midazolam     0.8394  0.5581   1.5040  0.1370  -0.2732   1.9520 
## inducerPropofol/Thiopental   -0.1080  1.5262  -0.0708  0.9438  -3.1505   2.9344 
## inducerThiopental            -0.1295  0.5901  -0.2194  0.8270  -1.3058   1.0469 
##  
## intrcpt                     *** 
## inducerEtomidate             ** 
## inducerMidazolam 
## inducerPropofol               . 
## inducerPropofol/Ketamine 
## inducerPropofol/Midazolam 
## inducerPropofol/Thiopental 
## inducerThiopental 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for opioid used
meta::metareg(mbin_cormack_random,opioid)
## 
## Mixed-Effects Model (k = 80; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.7149 (SE = 0.1665)
## tau (square root of estimated tau^2 value):             0.8455
## I^2 (residual heterogeneity / unaccounted variability): 64.08%
## H^2 (unaccounted variability / sampling variability):   2.78
## R^2 (amount of heterogeneity accounted for):            4.69%
## 
## Test for Residual Heterogeneity:
## QE(df = 76) = 152.4401, p-val < .0001
## 
## Test of Moderators (coefficients 2:4):
## F(df1 = 3, df2 = 76) = 2.1281, p-val = 0.1036
## 
## Model Results:
## 
##                     estimate      se     tval    pval    ci.lb    ci.ub 
## intrcpt              -1.1638  0.2652  -4.3890  <.0001  -1.6919  -0.6357  *** 
## opioidFentanyl       -0.3412  0.3058  -1.1156  0.2681  -0.9503   0.2679      
## opioidRemifentanil    0.1591  0.4405   0.3611  0.7190  -0.7182   1.0364      
## opioidSulfentanil    -1.5204  0.7048  -2.1573  0.0341  -2.9241  -0.1167    * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for neuromuscular blocking agent used
meta::metareg(mbin_cormack_random,blocker)
## Warning in rma.uni(yi = TE[!exclude], sei = seTE[!exclude], data = dataset, :
## Redundant predictors dropped from the model.
## 
## Mixed-Effects Model (k = 80; tau^2 estimator: SJ)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.7210 (SE = 0.1694)
## tau (square root of estimated tau^2 value):             0.8491
## I^2 (residual heterogeneity / unaccounted variability): 63.39%
## H^2 (unaccounted variability / sampling variability):   2.73
## R^2 (amount of heterogeneity accounted for):            3.88%
## 
## Test for Residual Heterogeneity:
## QE(df = 74) = 139.7692, p-val < .0001
## 
## Test of Moderators (coefficients 2:6):
## F(df1 = 5, df2 = 74) = 1.7597, p-val = 0.1317
## 
## Model Results:
## 
##                         estimate      se     tval    pval    ci.lb    ci.ub 
## intrcpt                  -0.9306  0.2512  -3.7054  0.0004  -1.4310  -0.4302 
## blockerAtracurium        -1.0006  0.4562  -2.1935  0.0314  -1.9096  -0.0917 
## blockerCisatracurium     -0.6820  0.5537  -1.2316  0.2220  -1.7853   0.4214 
## blockerRocuronium        -0.3473  0.3254  -1.0672  0.2894  -0.9957   0.3011 
## blockerSuccinylcholine   -0.9624  0.4164  -2.3111  0.0236  -1.7921  -0.1327 
## blockerVecuronium        -0.7403  0.4216  -1.7558  0.0833  -1.5804   0.0998 
##  
## intrcpt                 *** 
## blockerAtracurium         * 
## blockerCisatracurium 
## blockerRocuronium 
## blockerSuccinylcholine    * 
## blockerVecuronium         . 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Multiple Meta-regression for Cormack and Lehane
str(first)
## 'data.frame':    110 obs. of  18 variables:
##  $ id          : int  1 2 3 4 6 7 8 11 14 15 ...
##  $ author      : Factor w/ 100 levels "Abdallah","Abdelgalel",..: 1 1 2 3 4 5 5 6 7 8 ...
##  $ year        : int  2011 2019 2018 2015 2020 2016 2015 2015 2016 2012 ...
##  $ population  : Factor w/ 5 levels "Elderly","General",..: 4 2 2 2 3 3 2 3 2 2 ...
##  $ predicted   : Factor w/ 3 levels "","Difficult",..: 2 3 2 3 2 2 3 2 3 2 ...
##  $ set         : Factor w/ 5 levels "Emergency department",..: 4 4 2 4 4 4 4 4 4 4 ...
##  $ nature      : Factor w/ 3 levels "","Elective",..: 2 2 3 2 2 2 2 2 2 1 ...
##  $ technique   : Factor w/ 4 levels "","Rapid Sequence Induction",..: 1 3 2 3 3 1 3 3 3 1 ...
##  $ experience  : Factor w/ 3 levels "","Experienced",..: 3 2 2 2 2 2 2 2 3 1 ...
##  $ intervention: Factor w/ 1 level "Videolaryngoscope": 1 1 1 1 1 1 1 1 1 1 ...
##  $ reference   : Factor w/ 1 level "Macintosh": 1 1 1 1 1 1 1 1 1 1 ...
##  $ inducer     : Factor w/ 9 levels "","Etomidate",..: 1 4 6 4 4 1 4 4 4 1 ...
##  $ opioid      : Factor w/ 6 levels "","Fentanyl",..: 1 2 2 2 2 1 1 2 5 1 ...
##  $ blocker     : Factor w/ 8 levels "","Atracurium",..: 1 3 4 4 8 1 4 4 4 1 ...
##  $ ffirst.e1   : int  7 1 5 0 0 0 0 1 25 3 ...
##  $ ffirst.t1   : int  50 35 80 40 40 78 25 45 64 25 ...
##  $ ffirst.e2   : int  4 2 11 1 1 0 0 6 6 9 ...
##  $ ffirst.t2   : int  49 35 40 40 40 96 25 45 22 25 ...
model_cormack<-metafor::rma.uni(ai=fcormack.e1,n1i = fcormack.t1,ci=fcormack.e2,n2i = fcormack.t2,data = cormack,method = "ML", measure = "RR", mods = ~experience+technique+population+set+nature+inducer+opioid+blocker,test = "knha")
model_cormack
## 
## Mixed-Effects Model (k = 92; tau^2 estimator: ML)
## 
## tau^2 (estimated amount of residual heterogeneity):     0 (SE = 0.0207)
## tau (square root of estimated tau^2 value):             0
## I^2 (residual heterogeneity / unaccounted variability): 0.00%
## H^2 (unaccounted variability / sampling variability):   1.00
## R^2 (amount of heterogeneity accounted for):            100.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 63) = 61.6788, p-val = 0.5235
## 
## Test of Moderators (coefficients 2:29):
## F(df1 = 28, df2 = 63) = 3.7382, p-val < .0001
## 
## Model Results:
## 
##                                            estimate      se     tval    pval 
## intrcpt                                     -3.4763  2.1363  -1.6273  0.1087 
## experienceExperienced                       -0.7153  0.3329  -2.1485  0.0355 
## experienceNon-experienced                   -0.5884  0.3741  -1.5729  0.1207 
## techniqueRapid Sequence Induction           -0.5589  0.9212  -0.6068  0.5462 
## techniqueRegular                             0.3461  0.4877   0.7097  0.4805 
## techniqueRegular/Rapid Sequence Induction   -0.2000  0.8156  -0.2452  0.8071 
## populationGeneral                           -0.4954  0.4930  -1.0049  0.3188 
## populationNeck Immobilization               -0.9118  0.4393  -2.0758  0.0420 
## populationObese                             -0.1944  0.5375  -0.3616  0.7188 
## populationPregnant Women                     3.0183  1.7226   1.7522  0.0846 
## setOperating Room                            0.5586  0.6544   0.8536  0.3966 
## natureElective                               2.7357  1.8039   1.5165  0.1344 
## natureUrgent                                 4.2199  2.0008   2.1091  0.0389 
## inducerEtomidate                            -4.3516  1.7487  -2.4885  0.0155 
## inducerMidazolam                            -2.2746  1.1095  -2.0502  0.0445 
## inducerPropofol                             -1.2087  0.7067  -1.7104  0.0921 
## inducerPropofol/Ketamine                    -1.6763  1.1611  -1.4437  0.1538 
## inducerPropofol/Midazolam                    0.2461  0.6616   0.3719  0.7112 
## inducerPropofol/Thiopental                  -0.6581  1.7623  -0.3734  0.7101 
## inducerThiopental                           -1.3318  0.8443  -1.5773  0.1197 
## opioidFentanyl                               0.7758  0.4305   1.8021  0.0763 
## opioidRemifentanil                           1.1637  0.4894   2.3778  0.0205 
## opioidSulfentanil                            0.0448  0.8685   0.0515  0.9591 
## blockerAtracurium                           -0.2561  0.5361  -0.4776  0.6346 
## blockerCisatracurium                        -0.0119  0.6081  -0.0196  0.9844 
## blockerRocuronium                            0.4742  0.4304   1.1018  0.2747 
## blockerRocuronium/Vecuronium                 1.3260  2.0278   0.6539  0.5155 
## blockerSuccinylcholine                      -0.2008  0.5110  -0.3929  0.6957 
## blockerVecuronium                           -0.1696  0.4271  -0.3972  0.6926 
##                                              ci.lb    ci.ub 
## intrcpt                                    -7.7453   0.7927    
## experienceExperienced                      -1.3806  -0.0500  * 
## experienceNon-experienced                  -1.3359   0.1591    
## techniqueRapid Sequence Induction          -2.3998   1.2820    
## techniqueRegular                           -0.6285   1.3208    
## techniqueRegular/Rapid Sequence Induction  -1.8298   1.4299    
## populationGeneral                          -1.4806   0.4898    
## populationNeck Immobilization              -1.7896  -0.0340  * 
## populationObese                            -1.2684   0.8797    
## populationPregnant Women                   -0.4240   6.4605  . 
## setOperating Room                          -0.7492   1.8664    
## natureElective                             -0.8691   6.3405    
## natureUrgent                                0.2216   8.2182  * 
## inducerEtomidate                           -7.8461  -0.8571  * 
## inducerMidazolam                           -4.4918  -0.0575  * 
## inducerPropofol                            -2.6210   0.2035  . 
## inducerPropofol/Ketamine                   -3.9967   0.6440    
## inducerPropofol/Midazolam                  -1.0760   1.5682    
## inducerPropofol/Thiopental                 -4.1799   2.8637    
## inducerThiopental                          -3.0190   0.3554    
## opioidFentanyl                             -0.0845   1.6361  . 
## opioidRemifentanil                          0.1857   2.1417  * 
## opioidSulfentanil                          -1.6907   1.7802    
## blockerAtracurium                          -1.3274   0.8153    
## blockerCisatracurium                       -1.2270   1.2032    
## blockerRocuronium                          -0.3858   1.3342    
## blockerRocuronium/Vecuronium               -2.7262   5.3783    
## blockerSuccinylcholine                     -1.2220   0.8204    
## blockerVecuronium                          -1.0231   0.6838    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
metafor::permutest(model_cormack)
## Running 1000 iterations for approximate permutation test.
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |                                                                      |   1%
  |                                                                            
  |=                                                                     |   1%
  |                                                                            
  |=                                                                     |   2%
  |                                                                            
  |==                                                                    |   2%
  |                                                                            
  |==                                                                    |   3%
  |                                                                            
  |==                                                                    |   4%
  |                                                                            
  |===                                                                   |   4%
  |                                                                            
  |===                                                                   |   5%
  |                                                                            
  |====                                                                  |   5%
  |                                                                            
  |====                                                                  |   6%
  |                                                                            
  |=====                                                                 |   6%
  |                                                                            
  |=====                                                                 |   7%
  |                                                                            
  |=====                                                                 |   8%
  |                                                                            
  |======                                                                |   8%
  |                                                                            
  |======                                                                |   9%
  |                                                                            
  |=======                                                               |   9%
  |                                                                            
  |=======                                                               |  10%
  |                                                                            
  |=======                                                               |  11%
  |                                                                            
  |========                                                              |  11%
  |                                                                            
  |========                                                              |  12%
  |                                                                            
  |=========                                                             |  12%
  |                                                                            
  |=========                                                             |  13%
  |                                                                            
  |=========                                                             |  14%
  |                                                                            
  |==========                                                            |  14%
  |                                                                            
  |==========                                                            |  15%
  |                                                                            
  |===========                                                           |  15%
  |                                                                            
  |===========                                                           |  16%
  |                                                                            
  |============                                                          |  16%
  |                                                                            
  |============                                                          |  17%
  |                                                                            
  |============                                                          |  18%
  |                                                                            
  |=============                                                         |  18%
  |                                                                            
  |=============                                                         |  19%
  |                                                                            
  |==============                                                        |  19%
  |                                                                            
  |==============                                                        |  20%
  |                                                                            
  |==============                                                        |  21%
  |                                                                            
  |===============                                                       |  21%
  |                                                                            
  |===============                                                       |  22%
  |                                                                            
  |================                                                      |  22%
  |                                                                            
  |================                                                      |  23%
  |                                                                            
  |================                                                      |  24%
  |                                                                            
  |=================                                                     |  24%
  |                                                                            
  |=================                                                     |  25%
  |                                                                            
  |==================                                                    |  25%
  |                                                                            
  |==================                                                    |  26%
  |                                                                            
  |===================                                                   |  26%
  |                                                                            
  |===================                                                   |  27%
  |                                                                            
  |===================                                                   |  28%
  |                                                                            
  |====================                                                  |  28%
  |                                                                            
  |====================                                                  |  29%
  |                                                                            
  |=====================                                                 |  29%
  |                                                                            
  |=====================                                                 |  30%
  |                                                                            
  |=====================                                                 |  31%
  |                                                                            
  |======================                                                |  31%
  |                                                                            
  |======================                                                |  32%
  |                                                                            
  |=======================                                               |  32%
  |                                                                            
  |=======================                                               |  33%
  |                                                                            
  |=======================                                               |  34%
  |                                                                            
  |========================                                              |  34%
  |                                                                            
  |========================                                              |  35%
  |                                                                            
  |=========================                                             |  35%
  |                                                                            
  |=========================                                             |  36%
  |                                                                            
  |==========================                                            |  36%
  |                                                                            
  |==========================                                            |  37%
  |                                                                            
  |==========================                                            |  38%
  |                                                                            
  |===========================                                           |  38%
  |                                                                            
  |===========================                                           |  39%
  |                                                                            
  |============================                                          |  39%
  |                                                                            
  |============================                                          |  40%
  |                                                                            
  |============================                                          |  41%
  |                                                                            
  |=============================                                         |  41%
  |                                                                            
  |=============================                                         |  42%
  |                                                                            
  |==============================                                        |  42%
  |                                                                            
  |==============================                                        |  43%
  |                                                                            
  |==============================                                        |  44%
  |                                                                            
  |===============================                                       |  44%
  |                                                                            
  |===============================                                       |  45%
  |                                                                            
  |================================                                      |  45%
  |                                                                            
  |================================                                      |  46%
  |                                                                            
  |=================================                                     |  46%
  |                                                                            
  |=================================                                     |  47%
  |                                                                            
  |=================================                                     |  48%
  |                                                                            
  |==================================                                    |  48%
  |                                                                            
  |==================================                                    |  49%
  |                                                                            
  |===================================                                   |  49%
  |                                                                            
  |===================================                                   |  50%
  |                                                                            
  |===================================                                   |  51%
  |                                                                            
  |====================================                                  |  51%
  |                                                                            
  |====================================                                  |  52%
  |                                                                            
  |=====================================                                 |  52%
  |                                                                            
  |=====================================                                 |  53%
  |                                                                            
  |=====================================                                 |  54%
  |                                                                            
  |======================================                                |  54%
  |                                                                            
  |======================================                                |  55%
  |                                                                            
  |=======================================                               |  55%
  |                                                                            
  |=======================================                               |  56%
  |                                                                            
  |========================================                              |  56%
  |                                                                            
  |========================================                              |  57%
  |                                                                            
  |========================================                              |  58%
  |                                                                            
  |=========================================                             |  58%
  |                                                                            
  |=========================================                             |  59%
  |                                                                            
  |==========================================                            |  59%
  |                                                                            
  |==========================================                            |  60%
  |                                                                            
  |==========================================                            |  61%
  |                                                                            
  |===========================================                           |  61%
  |                                                                            
  |===========================================                           |  62%
  |                                                                            
  |============================================                          |  62%
  |                                                                            
  |============================================                          |  63%
  |                                                                            
  |============================================                          |  64%
  |                                                                            
  |=============================================                         |  64%
  |                                                                            
  |=============================================                         |  65%
  |                                                                            
  |==============================================                        |  65%
  |                                                                            
  |==============================================                        |  66%
  |                                                                            
  |===============================================                       |  66%
  |                                                                            
  |===============================================                       |  67%
  |                                                                            
  |===============================================                       |  68%
  |                                                                            
  |================================================                      |  68%
  |                                                                            
  |================================================                      |  69%
  |                                                                            
  |=================================================                     |  69%
  |                                                                            
  |=================================================                     |  70%
  |                                                                            
  |=================================================                     |  71%
  |                                                                            
  |==================================================                    |  71%
  |                                                                            
  |==================================================                    |  72%
  |                                                                            
  |===================================================                   |  72%
  |                                                                            
  |===================================================                   |  73%
  |                                                                            
  |===================================================                   |  74%
  |                                                                            
  |====================================================                  |  74%
  |                                                                            
  |====================================================                  |  75%
  |                                                                            
  |=====================================================                 |  75%
  |                                                                            
  |=====================================================                 |  76%
  |                                                                            
  |======================================================                |  76%
  |                                                                            
  |======================================================                |  77%
  |                                                                            
  |======================================================                |  78%
  |                                                                            
  |=======================================================               |  78%
  |                                                                            
  |=======================================================               |  79%
  |                                                                            
  |========================================================              |  79%
  |                                                                            
  |========================================================              |  80%
  |                                                                            
  |========================================================              |  81%
  |                                                                            
  |=========================================================             |  81%
  |                                                                            
  |=========================================================             |  82%
  |                                                                            
  |==========================================================            |  82%
  |                                                                            
  |==========================================================            |  83%
  |                                                                            
  |==========================================================            |  84%
  |                                                                            
  |===========================================================           |  84%
  |                                                                            
  |===========================================================           |  85%
  |                                                                            
  |============================================================          |  85%
  |                                                                            
  |============================================================          |  86%
  |                                                                            
  |=============================================================         |  86%
  |                                                                            
  |=============================================================         |  87%
  |                                                                            
  |=============================================================         |  88%
  |                                                                            
  |==============================================================        |  88%
  |                                                                            
  |==============================================================        |  89%
  |                                                                            
  |===============================================================       |  89%
  |                                                                            
  |===============================================================       |  90%
  |                                                                            
  |===============================================================       |  91%
  |                                                                            
  |================================================================      |  91%
  |                                                                            
  |================================================================      |  92%
  |                                                                            
  |=================================================================     |  92%
  |                                                                            
  |=================================================================     |  93%
  |                                                                            
  |=================================================================     |  94%
  |                                                                            
  |==================================================================    |  94%
  |                                                                            
  |==================================================================    |  95%
  |                                                                            
  |===================================================================   |  95%
  |                                                                            
  |===================================================================   |  96%
  |                                                                            
  |====================================================================  |  96%
  |                                                                            
  |====================================================================  |  97%
  |                                                                            
  |====================================================================  |  98%
  |                                                                            
  |===================================================================== |  98%
  |                                                                            
  |===================================================================== |  99%
  |                                                                            
  |======================================================================|  99%
  |                                                                            
  |======================================================================| 100%
## 
## Test of Moderators (coefficients 2:29):
## F(df1 = 28, df2 = 63) = 3.7382, p-val* = 0.0100
## 
## Model Results:
## 
##                                            estimate      se     tval   pval* 
## intrcpt                                     -3.4763  2.1363  -1.6273  0.1700 
## experienceExperienced                       -0.7153  0.3329  -2.1485  0.0920 
## experienceNon-experienced                   -0.5884  0.3741  -1.5729  0.1960 
## techniqueRapid Sequence Induction           -0.5589  0.9212  -0.6068  0.5760 
## techniqueRegular                             0.3461  0.4877   0.7097  0.5600 
## techniqueRegular/Rapid Sequence Induction   -0.2000  0.8156  -0.2452  0.8170 
## populationGeneral                           -0.4954  0.4930  -1.0049  0.4110 
## populationNeck Immobilization               -0.9118  0.4393  -2.0758  0.0990 
## populationObese                             -0.1944  0.5375  -0.3616  0.7670 
## populationPregnant Women                     3.0183  1.7226   1.7522  0.0980 
## setOperating Room                            0.5586  0.6544   0.8536  0.4260 
## natureElective                               2.7357  1.8039   1.5165  0.1230 
## natureUrgent                                 4.2199  2.0008   2.1091  0.0320 
## inducerEtomidate                            -4.3516  1.7487  -2.4885  0.0210 
## inducerMidazolam                            -2.2746  1.1095  -2.0502  0.0800 
## inducerPropofol                             -1.2087  0.7067  -1.7104  0.1410 
## inducerPropofol/Ketamine                    -1.6763  1.1611  -1.4437  0.1950 
## inducerPropofol/Midazolam                    0.2461  0.6616   0.3719  0.7540 
## inducerPropofol/Thiopental                  -0.6581  1.7623  -0.3734  0.7300 
## inducerThiopental                           -1.3318  0.8443  -1.5773  0.1870 
## opioidFentanyl                               0.7758  0.4305   1.8021  0.1510 
## opioidRemifentanil                           1.1637  0.4894   2.3778  0.0730 
## opioidSulfentanil                            0.0448  0.8685   0.0515  0.9740 
## blockerAtracurium                           -0.2561  0.5361  -0.4776  0.7130 
## blockerCisatracurium                        -0.0119  0.6081  -0.0196  0.9880 
## blockerRocuronium                            0.4742  0.4304   1.1018  0.3580 
## blockerRocuronium/Vecuronium                 1.3260  2.0278   0.6539  0.5070 
## blockerSuccinylcholine                      -0.2008  0.5110  -0.3929  0.7620 
## blockerVecuronium                           -0.1696  0.4271  -0.3972  0.7480 
##                                              ci.lb    ci.ub 
## intrcpt                                    -7.7453   0.7927    
## experienceExperienced                      -1.3806  -0.0500  . 
## experienceNon-experienced                  -1.3359   0.1591    
## techniqueRapid Sequence Induction          -2.3998   1.2820    
## techniqueRegular                           -0.6285   1.3208    
## techniqueRegular/Rapid Sequence Induction  -1.8298   1.4299    
## populationGeneral                          -1.4806   0.4898    
## populationNeck Immobilization              -1.7896  -0.0340  . 
## populationObese                            -1.2684   0.8797    
## populationPregnant Women                   -0.4240   6.4605  . 
## setOperating Room                          -0.7492   1.8664    
## natureElective                             -0.8691   6.3405    
## natureUrgent                                0.2216   8.2182  * 
## inducerEtomidate                           -7.8461  -0.8571  * 
## inducerMidazolam                           -4.4918  -0.0575  . 
## inducerPropofol                            -2.6210   0.2035    
## inducerPropofol/Ketamine                   -3.9967   0.6440    
## inducerPropofol/Midazolam                  -1.0760   1.5682    
## inducerPropofol/Thiopental                 -4.1799   2.8637    
## inducerThiopental                          -3.0190   0.3554    
## opioidFentanyl                             -0.0845   1.6361    
## opioidRemifentanil                          0.1857   2.1417  . 
## opioidSulfentanil                          -1.6907   1.7802    
## blockerAtracurium                          -1.3274   0.8153    
## blockerCisatracurium                       -1.2270   1.2032    
## blockerRocuronium                          -0.3858   1.3342    
## blockerRocuronium/Vecuronium               -2.7262   5.3783    
## blockerSuccinylcholine                     -1.2220   0.8204    
## blockerVecuronium                          -1.0231   0.6838    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
model_cormack.2<-metafor::rma.uni(ai=fcormack.e1,n1i = fcormack.t1,ci=fcormack.e2,n2i = fcormack.t2,data = cormack,method = "ML", measure = "RR", mods = ~nature+inducer,test = "knha")
model_cormack.2
## 
## Mixed-Effects Model (k = 92; tau^2 estimator: ML)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.2299 (SE = 0.1107)
## tau (square root of estimated tau^2 value):             0.4795
## I^2 (residual heterogeneity / unaccounted variability): 32.15%
## H^2 (unaccounted variability / sampling variability):   1.47
## R^2 (amount of heterogeneity accounted for):            44.76%
## 
## Test for Residual Heterogeneity:
## QE(df = 82) = 127.4247, p-val = 0.0010
## 
## Test of Moderators (coefficients 2:10):
## F(df1 = 9, df2 = 82) = 2.4896, p-val = 0.0144
## 
## Model Results:
## 
##                             estimate      se     tval    pval    ci.lb    ci.ub 
## intrcpt                      -0.4924  0.7386  -0.6668  0.5068  -1.9617   0.9768 
## natureElective               -0.6700  0.6845  -0.9788  0.3305  -2.0316   0.6916 
## natureUrgent                  0.0722  0.8260   0.0875  0.9305  -1.5709   1.7154 
## inducerEtomidate             -3.7169  1.4614  -2.5434  0.0129  -6.6240  -0.8097 
## inducerMidazolam             -1.0732  0.9155  -1.1723  0.2445  -2.8944   0.7479 
## inducerPropofol              -0.2091  0.3060  -0.6834  0.4963  -0.8179   0.3996 
## inducerPropofol/Ketamine     -0.9069  0.7126  -1.2726  0.2067  -2.3245   0.5107 
## inducerPropofol/Midazolam     1.0736  0.4870   2.2045  0.0303   0.1048   2.0425 
## inducerPropofol/Thiopental    0.0638  1.6392   0.0389  0.9690  -3.1970   3.3247 
## inducerThiopental            -0.0614  0.5529  -0.1110  0.9119  -1.1613   1.0386 
##  
## intrcpt 
## natureElective 
## natureUrgent 
## inducerEtomidate            * 
## inducerMidazolam 
## inducerPropofol 
## inducerPropofol/Ketamine 
## inducerPropofol/Midazolam   * 
## inducerPropofol/Thiopental 
## inducerThiopental 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Funnel Plot for Cormack and Lehane
meta::funnel(mbin_cormack_random,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))
meta::metabias(mbin_cormack_random, method.bias = "linreg")
## 
##  Linear regression test of funnel plot asymmetry
## 
## data:  mbin_cormack_random
## t = -4.1969, df = 78, p-value = 7.108e-05
## alternative hypothesis: asymmetry in funnel plot
## sample estimates:
##       bias    se.bias  intercept 
## -0.9512368  0.2266518 -0.6152580
dmetar::eggers.test(mbin_cormack_random)
##              Intercept ConfidenceInterval      t     p
## Egger's test    -0.951      -1.343--0.559 -4.197 7e-05
meta::funnel(mbin_cormack_random,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))

trimfill_cormack<-meta::trimfill(mbin_cormack_random)
trimfill_cormack
##                               RR             95%-CI %W(random)
## Abdallah 2011             0.6236 [0.2634;   1.4765]        1.4
## Abdelgalel 2018           0.2083 [0.0788;   0.5506]        1.4
## Agrawal 2020              0.0164 [0.0010;   0.2591]        0.7
## Ahmad 2016                0.0483 [0.0121;   0.1920]        1.2
## Akbar 2015                0.6667 [0.1169;   3.8014]        1.1
## Al-Ghamdi 2016            0.4583 [0.1112;   1.8894]        1.2
## Ali 2017                  0.2500 [0.0578;   1.0814]        1.2
## Altun 2018                1.0000 [0.2637;   3.7921]        1.3
## Andersen 2011             0.1429 [0.0342;   0.5962]        1.2
## Aoi 2010                  0.0526 [0.0033;   0.8362]        0.7
## Aqil 2016                 0.2500 [0.0292;   2.1399]        0.9
## Aqil 2017                 0.2143 [0.0644;   0.7130]        1.3
## Arici 2014                5.0000 [0.2477; 100.9273]        0.7
## Aziz 2012                 0.3523 [0.1776;   0.6991]        1.5
## Barak 2007                0.0865 [0.0049;   1.5111]        0.7
## Bashir 2020               0.2000 [0.0099;   4.0371]        0.7
## Colak 2019                0.7000 [0.2924;   1.6759]        1.4
## El-Tahan 2018             1.3465 [0.4885;   3.7120]        1.4
## Huang 2020                0.0342 [0.0020;   0.5786]        0.7
## Bhat 2015                 0.2667 [0.0951;   0.7476]        1.4
## Bilehjani 2009            0.3169 [0.0133;   7.5444]        0.6
## Blajic 2019               0.4958 [0.1824;   1.3478]        1.4
## Cakir 2020                1.0000 [0.6550;   1.5266]        1.5
## Chandrashekaraiah 2017    0.5000 [0.0989;   2.5270]        1.1
## Colak 2015                0.1043 [0.0125;   0.8678]        0.9
## Dey 2020                  0.3086 [0.1602;   0.5947]        1.5
## Erden 2010                0.3333 [0.0387;   2.8745]        0.9
## Erturk 2015               0.2500 [0.0566;   1.1051]        1.2
## Gao 2018                  0.9491 [0.5035;   1.7891]        1.5
## Gupta 2020                0.0476 [0.0029;   0.7857]        0.7
## Hamp 2015                 0.1302 [0.0075;   2.2530]        0.7
## Hosalli 2017              0.1429 [0.0077;   2.6497]        0.7
## Hu 2017                   0.1111 [0.0061;   2.0369]        0.7
## Ilyas 2014                0.0169 [0.0010;   0.2744]        0.7
## Jungbauer 2009            0.2778 [0.1460;   0.5286]        1.5
## Kaur 2020                 0.0265 [0.0016;   0.4436]        0.7
## Khan 2008                 0.1429 [0.0078;   2.6093]        0.7
## Kido 2015                 0.0909 [0.0053;   1.5599]        0.7
## Kim 2013                  0.0337 [0.0021;   0.5303]        0.7
## Kleine-Brueggeney 2017    0.2520 [0.1949;   0.3259]        1.6
## Kunaz 2016                0.1111 [0.0061;   2.0105]        0.7
## Laosuwan 2015             0.1429 [0.0083;   2.4653]        0.7
## Lascarrou 2017            0.5486 [0.3213;   0.9367]        1.5
## Lim 2005                  0.0588 [0.0035;   0.9748]        0.7
## Lin 2012                  0.1976 [0.0236;   1.6548]        0.9
## Liu 2016                  0.0674 [0.0039;   1.1627]        0.7
## Liu 2019                  0.0076 [0.0005;   0.1219]        0.7
## Maharaj 2006              0.3333 [0.0141;   7.8648]        0.6
## Maharaj 2007              0.0667 [0.0041;   1.0925]        0.7
## Maharaj 2008              0.0286 [0.0018;   0.4441]        0.7
## Mahmood 2015              0.2000 [0.0100;   3.9955]        0.7
## Malik 2008                0.1333 [0.0273;   0.6518]        1.2
## Malik1 2009               0.0297 [0.0018;   0.4944]        0.7
## Malik2 2009               0.2000 [0.0100;   3.9955]        0.7
## Mcelwain 2011             0.2138 [0.0730;   0.6260]        1.4
## Kulkarni 2013             0.3333 [0.0141;   7.8648]        0.6
## Inangil 2018              0.1111 [0.0062;   1.9882]        0.7
## Jafra 2018                0.1111 [0.0061;   2.0369]        0.7
## Ndoko 2008                0.0435 [0.0026;   0.7193]        0.7
## Ninan 2016                0.3333 [0.0141;   7.8648]        0.6
## Nishiyama 2011            1.6509 [0.6054;   4.5022]        1.4
## Parasa 2016               0.2500 [0.0296;   2.1081]        0.9
## Pazur 2016                0.2000 [0.0101;   3.9701]        0.7
## Ranieri 2012              0.0628 [0.0037;   1.0770]        0.7
## Reena 2019                0.2222 [0.0505;   0.9774]        1.2
## Risse 2020                0.3043 [0.0129;   7.2021]        0.6
## Ruetzeler 2020            0.2864 [0.0826;   0.9927]        1.3
## Shah 2016                 0.3224 [0.0137;   7.6018]        0.6
## Sun 2005                  0.8333 [0.4453;   1.5594]        1.5
## Takenaka 2011             0.1944 [0.0097;   3.9039]        0.7
## Teoh 2010                 0.2000 [0.0487;   0.8219]        1.2
## Toker 2019                0.1538 [0.0366;   0.6469]        1.2
## Tolon 2012                0.2000 [0.0102;   3.9140]        0.7
## Varsha 2019               0.0435 [0.0027;   0.7100]        0.7
## Vijayakumar 2016          0.0400 [0.0024;   0.6556]        0.7
## Walker 2009               3.0000 [0.1247;  72.1913]        0.6
## Yoo 2018                  0.1000 [0.0140;   0.7162]        1.0
## Yumul 2016                0.1253 [0.0430;   0.3648]        1.4
## Cavus 2011                0.7857 [0.3244;   1.9033]        1.4
## Lee 2012                  0.0375 [0.0021;   0.6733]        0.7
## Filled: Yumul 2016        1.3538 [0.4648;   3.9434]        1.4
## Filled: Hu 2017           1.5261 [0.0833;  27.9764]        0.7
## Filled: Kunaz 2016        1.5261 [0.0843;  27.6139]        0.7
## Filled: Inangil 2018      1.5261 [0.0853;  27.3085]        0.7
## Filled: Jafra 2018        1.5261 [0.0833;  27.9764]        0.7
## Filled: Colak 2015        1.6265 [0.1954;  13.5379]        0.9
## Filled: Yoo 2018          1.6957 [0.2367;  12.1453]        1.0
## Filled: Kido 2015         1.8653 [0.1087;  32.0068]        0.7
## Filled: Barak 2007        1.9608 [0.1122;  34.2637]        0.7
## Filled: Liu 2016          2.5151 [0.1458;  43.3763]        0.7
## Filled: Maharaj 2007      2.5435 [0.1552;  41.6809]        0.7
## Filled: Ranieri 2012      2.7013 [0.1574;  46.3475]        0.7
## Filled: Lim 2005          2.8827 [0.1740;  47.7691]        0.7
## Filled: Aoi 2010          3.2218 [0.2028;  51.1874]        0.7
## Filled: Ahmad 2016        3.5133 [0.8831;  13.9770]        1.2
## Filled: Gupta 2020        3.5610 [0.2158;  58.7583]        0.7
## Filled: Ndoko 2008        3.9001 [0.2358;  64.5184]        0.7
## Filled: Varsha 2019       3.9001 [0.2388;  63.6847]        0.7
## Filled: Vijayakumar 2016  4.2392 [0.2586;  69.4811]        0.7
## Filled: Lee 2012          4.5185 [0.2519;  81.0636]        0.7
## Filled: Huang 2020        4.9620 [0.2930;  84.0193]        0.7
## Filled: Kim 2013          5.0330 [0.3198;  79.2172]        0.7
## Filled: Malik1 2009       5.7088 [0.3430;  95.0265]        0.7
## Filled: Maharaj 2008      5.9349 [0.3818;  92.2508]        0.7
## Filled: Kaur 2020         6.4039 [0.3822; 107.2901]        0.7
## Filled: Ilyas 2014       10.0046 [0.6179; 161.9751]        0.7
## Filled: Agrawal 2020     10.3437 [0.6545; 163.4756]        0.7
## Filled: Liu 2019         22.3042 [1.3913; 357.5672]        0.7
## 
## Number of studies combined: k = 108 (with 28 added studies)
## 
##                          RR           95%-CI     t  p-value
## Random effects model 0.3877 [0.2883; 0.5214] -6.34 < 0.0001
## Prediction interval         [0.0280; 5.3695]               
## 
## Quantifying heterogeneity:
##  tau^2 = 1.7350 [0.5516; 2.0050]; tau = 1.3172 [0.7427; 1.4160];
##  I^2 = 56.6% [46.2%; 65.0%]; H = 1.52 [1.36; 1.69]
## 
## Test of heterogeneity:
##       Q d.f.  p-value
##  246.41  107 < 0.0001
## 
## Details on meta-analytical method:
## - Inverse variance method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Trim-and-fill method to adjust for funnel plot asymmetry

Time for Intubation

time<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Table Pairwise VLs Intubation Time.csv")

#Number of comparisons and patients meta-analized for time for intubation
length(time$mtime.1)
## [1] 126
sum(time$ftime.1,time$ftime.2)
## [1] 10613
#Meta-analysis for time for intubation
mcont_time<-meta::metacont(ftime.1,mtime.1,sdtime.1,ftime.2,mtime.2,sdtime.2,data=time,studlab = paste(author,year),comb.fixed = FALSE,comb.random = TRUE,prediction = TRUE,sm="SMD")
mcont_time
##                                   SMD             95%-CI %W(random)
## Abdallah 2019                 -0.6764 [-1.1589; -0.1939]        0.8
## Abdelgalel 2018                0.2258 [-0.3126;  0.7642]        0.8
## Abdelgalel 2018                0.2978 [-0.2418;  0.8374]        0.8
## Abdelgawad - Normotensos 2015 -2.4257 [-3.2609; -1.5905]        0.7
## Abdelgawad - Hipertensos 2015 -2.1291 [-2.9201; -1.3382]        0.8
## Aggarwal 2019                  5.0579 [ 4.2423;  5.8735]        0.7
## Ahmad 2016                     4.0959 [ 3.5679;  4.6238]        0.8
## Ahmad 2015                     0.5655 [-0.0008;  1.1317]        0.8
## Akbar 2015                    -0.7636 [-1.1923; -0.3350]        0.8
## Akbarzadeh 2017               -0.0143 [-0.4897;  0.4610]        0.8
## Al - Ghamdi 2016               0.8207 [-0.0656;  1.7070]        0.7
## Al - Ghamdi 2016               3.0833 [ 1.8624;  4.3042]        0.6
## Al - Ghamdi 2016               1.3619 [ 0.4723;  2.2516]        0.7
## Ali 2012                      -0.6936 [-1.2658; -0.1214]        0.8
## Ali 2017                      -0.5621 [-1.0788; -0.0455]        0.8
## Altun 2018                    -0.3879 [-0.9295;  0.1536]        0.8
## Altun 2018                    -1.4170 [-2.0144; -0.8195]        0.8
## Amini 2015                     0.8256 [ 0.3364;  1.3148]        0.8
## Ander 2017                    -0.1410 [-0.5854;  0.3034]        0.8
## Aoi 2010                       0.2745 [-0.3822;  0.9313]        0.8
## Aqil 2016                     -0.6713 [-1.1224; -0.2203]        0.8
## Aqil 2017                     -0.5322 [-0.8695; -0.1950]        0.9
## Arici 2014                     1.2926 [ 0.8086;  1.7767]        0.8
## Aziz 2012                      0.4704 [ 0.2245;  0.7163]        0.9
## Bakshi - Nti 2015              1.0686 [ 0.0930;  2.0441]        0.7
## Bakshi - Nti 2015              1.0686 [ 0.0930;  2.0441]        0.7
## Bakshi - Nvl 2015              7.9734 [ 5.1484; 10.7985]        0.3
## Bakshi - Nvl 2015              6.9883 [ 4.4741;  9.5026]        0.3
## Bakshi - Exp 2015              5.1551 [ 3.2018;  7.1083]        0.4
## Bakshi - Exp 2015              4.9935 [ 3.0881;  6.8989]        0.5
## Bakshi 2019                    0.3914 [-0.0720;  0.8549]        0.8
## Barak 2007                     0.7145 [ 0.4037;  1.0253]        0.9
## Barman 2017                   -0.1143 [-0.5832;  0.3546]        0.8
## Bashir 2020                   -0.4528 [-0.8969; -0.0086]        0.8
## Colak 2019                     1.0554 [ 0.6131;  1.4977]        0.8
## Kucukosman 2020                0.9848 [ 0.4469;  1.5227]        0.8
## Wasem 2013                     0.1881 [-0.3192;  0.6954]        0.8
## Yao 2015                       1.7045 [ 1.2347;  2.1742]        0.8
## Bhalla 2018                    1.4217 [ 0.6086;  2.2348]        0.7
## Bhandari 2013                 -2.7051 [-3.3190; -2.0912]        0.8
## Bharti 2014                    0.6737 [ 0.0263;  1.3210]        0.8
## Bhat 2015                     -1.0098 [-1.4270; -0.5926]        0.8
## Bilehjani 2009                 0.9775 [ 0.5064;  1.4486]        0.8
## Blajic 2019                    0.0000 [-0.4445;  0.4445]        0.8
## Blajic 2019                   -0.4018 [-0.8441;  0.0406]        0.8
## Cakir 2020                     0.6310 [ 0.1201;  1.1419]        0.8
## Caparlar 2019                 -1.5320 [-2.0398; -1.0242]        0.8
## Carlino 2009                   0.4265 [-0.2985;  1.1514]        0.8
## Chalkeidis 2010                0.7792 [ 0.2450;  1.3134]        0.8
## Choi 2011                     -0.0782 [-0.5844;  0.4281]        0.8
## Colak 2015                     0.7694 [ 0.2626;  1.2762]        0.8
## Colak 2015                     1.3797 [ 0.8396;  1.9198]        0.8
## Dashti 2014                    1.2810 [ 0.7173;  1.8447]        0.8
## Dhonneur 2008                 -2.7088 [-3.0832; -2.3343]        0.8
## Di Marco 2011                 -0.7686 [-1.1600; -0.3772]        0.8
## Erden 2010                     1.6399 [ 0.8249;  2.4550]        0.7
## Goksu 2016                    -0.2230 [-0.5440;  0.0981]        0.9
## Gunes 2020                     1.4455 [ 1.1166;  1.7744]        0.9
## Gupta 2020                    -0.1654 [-0.6045;  0.2736]        0.8
## Hamp 2015                      0.3828 [-0.2704;  1.0359]        0.8
## Hirabayashi 2009              -0.7998 [-0.9785; -0.6211]        0.9
## Hirabayashi 2009              -0.4875 [-0.7689; -0.2062]        0.9
## Hirabayashi 2010              -0.1963 [-0.4741;  0.0816]        0.9
## Hsu 2012                      -0.7473 [-1.2719; -0.2226]        0.8
## Hu 2017                       -0.3505 [-0.6328; -0.0683]        0.9
## Ilyas 2014                     0.5321 [ 0.1794;  0.8849]        0.9
## Jungbauer 2009                -0.3395 [-0.6187; -0.0602]        0.9
## Khan 2008                      0.2393 [-0.3539;  0.8326]        0.8
## Kido 2015                     -0.6884 [-1.2603; -0.1165]        0.8
## Kim 2013                      -0.8025 [-1.4122; -0.1928]        0.8
## Koh 2010                      -0.9366 [-1.5231; -0.3502]        0.8
## Kucukosman 2020                0.9848 [ 0.4469;  1.5227]        0.8
## Kumar_2 2019                  -0.2532 [-0.7615;  0.2550]        0.8
## Kunaz 2016                     0.9117 [ 0.4991;  1.3243]        0.8
## Laosuwan 2015                  1.3395 [ 0.3964;  2.2825]        0.7
## Lee 2013                       3.7949 [ 2.7219;  4.8678]        0.7
## Lim 2005                      -0.6018 [-1.1200; -0.0836]        0.8
## Liu 2014                      -1.5736 [-2.0782; -1.0690]        0.8
## Liu 2016                       0.1391 [-0.1559;  0.4341]        0.9
## Liu 2019                      -0.0558 [-0.2624;  0.1508]        0.9
## Maharaj 2006                  -0.0223 [-0.5284;  0.4838]        0.8
## Maharaj 2007                  -0.7376 [-1.3804; -0.0949]        0.8
## Maharaj 2008                  -4.4937 [-5.7019; -3.2855]        0.6
## Mahmood 2015                  -0.9226 [-1.4567; -0.3885]        0.8
## Malik 2008                     1.5112 [ 0.7150;  2.3073]        0.8
## Malik 2008                     1.1925 [ 0.4257;  1.9593]        0.8
## Malik 2008                     0.6892 [-0.0440;  1.4223]        0.8
## Maruyama 2008                  0.9466 [ 0.0945;  1.7987]        0.7
## Myunghun-Kim 2017              0.9853 [ 0.3251;  1.6455]        0.8
## Inangil 2018                  -0.2170 [-0.6870;  0.2530]        0.8
## Jafra 2018                     0.8912 [ 0.6003;  1.1820]        0.9
## Ndoko 2008                    -1.6035 [-2.0433; -1.1638]        0.8
## Nishiyama 2011                 0.4487 [-0.2332;  1.1305]        0.8
## Nishiyama 2011                 1.4753 [ 0.7455;  2.2050]        0.8
## Nishiyama 2011                 2.8038 [ 1.9266;  3.6811]        0.7
## Pappu 2020                     1.0982 [ 0.4344;  1.7619]        0.8
## Pappu 2020                     0.0881 [-0.5320;  0.7082]        0.8
## Parasa 2016                    1.9667 [ 1.3434;  2.5901]        0.8
## Pazur 2016                    -0.0433 [-0.5870;  0.5004]        0.8
## Peirovifar 2014               -1.6773 [-2.4080; -0.9465]        0.8
## Pournajafian 2014              1.4975 [ 1.0401;  1.9549]        0.8
## Ranieri 2012                  -1.4154 [-1.7983; -1.0326]        0.8
## Reena 2019                    -0.9104 [-1.3230; -0.4978]        0.8
## Shah 2016                     -0.3297 [-0.8438;  0.1844]        0.8
## Shukla 2017                   -0.5891 [-1.0373; -0.1410]        0.8
## Sulser 2016                    0.0989 [-0.2246;  0.4224]        0.9
## Sun 2005                       1.1310 [ 0.8321;  1.4300]        0.9
## Taylor 2013                    0.8800 [ 0.4415;  1.3185]        0.8
## Teoh 2010                     -0.1486 [-0.5425;  0.2453]        0.8
## Teoh 2010                      0.5652 [ 0.1657;  0.9647]        0.8
## Teoh 2010                      0.5968 [ 0.2010;  0.9926]        0.8
## Toker 2019                    -1.0109 [-1.4281; -0.5936]        0.8
## Tolon 2012                    -0.8071 [-1.4543; -0.1599]        0.8
## Tsan 2020                      0.6173 [ 0.2755;  0.9590]        0.9
## Yumul 2016                    -0.6805 [-1.4132;  0.0522]        0.8
## Yumul 2016                    -0.0270 [-0.7427;  0.6887]        0.8
## Yumul 2016                    -0.2275 [-0.9203;  0.4652]        0.8
## El-Tahan 2017                  1.7259 [ 0.8548;  2.5970]        0.7
## Enomoto 2008                   0.1524 [-0.1232;  0.4281]        0.9
## Maruyama 2008                  0.6037 [-0.2549;  1.4624]        0.7
## Turkstra 2005                  0.9339 [-0.0524;  1.9202]        0.7
## Zhao 2014                     -1.2951 [-1.6491; -0.9411]        0.9
## Serocki 2013                   0.7451 [ 0.1251;  1.3650]        0.8
## Serocki 2013                   0.6208 [ 0.0034;  1.2382]        0.8
## Arora 2013                     0.3881 [ 0.0072;  0.7690]        0.8
## Avula 2019                     1.9217 [ 1.3031;  2.5402]        0.8
## 
## Number of studies combined: k = 126
## 
##                         SMD            95%-CI    z p-value
## Random effects model 0.2854 [ 0.1019; 0.4689] 3.05  0.0023
## Prediction interval         [-1.7007; 2.2716]             
## 
## Quantifying heterogeneity:
##  tau^2 = 0.9982 [1.1230; 1.9640]; tau = 0.9991 [1.0597; 1.4014];
##  I^2 = 94.7% [94.1%; 95.2%]; H = 4.34 [4.11; 4.59]
## 
## Test of heterogeneity:
##        Q d.f. p-value
##  2356.90  125       0
## 
## Details on meta-analytical method:
## - Inverse variance method
## - DerSimonian-Laird estimator for tau^2
## - Jackson method for confidence interval of tau^2 and tau
## - Hedges' g (bias corrected standardised mean difference)
#Forest plot for time for intubation
meta::forest(mcont_time,sortvar=TE,lab.e="Videolaryngoscopes",lab.c="Macintosh",col.study="black",col.square="black",col.diamond="blue")

meta::forest(mcont_time,sortvar=TE,lab.e="Videolaryngoscopes",lab.c="Macintosh",col.study="black",col.square="black",col.diamond="blue")

#Detecting Outliers for time for intubation
dmetar::find.outliers(mcont_time)
## Identified outliers (random-effects model) 
## ------------------------------------------ 
## "Abdallah 2019", "Abdelgawad - Normotensos 2015", "Abdelgawad - Hipertensos 2015", "Aggarwal 2019", "Ahmad 2016", "Akbar 2015", "Al - Ghamdi 2016", "Al - Ghamdi 2016", "Ali 2012", "Ali 2017", "Altun 2018", "Aqil 2016", "Aqil 2017", "Arici 2014", "Bakshi - Nvl 2015", "Bakshi - Nvl 2015", "Bakshi - Exp 2015", "Bakshi - Exp 2015", "Bashir 2020", "Colak 2019", "Yao 2015", "Bhalla 2018", "Bhandari 2013", "Bhat 2015", "Bilehjani 2009", "Blajic 2019", "Caparlar 2019", "Colak 2015", "Dashti 2014", "Dhonneur 2008", "Di Marco 2011", "Erden 2010", "Goksu 2016", "Gunes 2020", "Hirabayashi 2009", "Hirabayashi 2009", "Hirabayashi 2010", "Hsu 2012", "Hu 2017", "Jungbauer 2009", "Kido 2015", "Kim 2013", "Koh 2010", "Kunaz 2016", "Lee 2013", "Lim 2005", "Liu 2014", "Maharaj 2007", "Maharaj 2008", "Mahmood 2015", "Malik 2008", "Jafra 2018", "Ndoko 2008", "Nishiyama 2011", "Nishiyama 2011", "Parasa 2016", "Peirovifar 2014", "Pournajafian 2014", "Ranieri 2012", "Reena 2019", "Shukla 2017", "Sun 2005", "Toker 2019", "Tolon 2012", "Yumul 2016", "El-Tahan 2017", "Zhao 2014", "Avula 2019" 
##  
## Results with outliers removed 
## ----------------------------- 
##                                   SMD             95%-CI %W(random) exclude
## Abdallah 2019                 -0.6764 [-1.1589; -0.1939]        0.0       *
## Abdelgalel 2018                0.2258 [-0.3126;  0.7642]        1.7        
## Abdelgalel 2018                0.2978 [-0.2418;  0.8374]        1.7        
## Abdelgawad - Normotensos 2015 -2.4257 [-3.2609; -1.5905]        0.0       *
## Abdelgawad - Hipertensos 2015 -2.1291 [-2.9201; -1.3382]        0.0       *
## Aggarwal 2019                  5.0579 [ 4.2423;  5.8735]        0.0       *
## Ahmad 2016                     4.0959 [ 3.5679;  4.6238]        0.0       *
## Ahmad 2015                     0.5655 [-0.0008;  1.1317]        1.6        
## Akbar 2015                    -0.7636 [-1.1923; -0.3350]        0.0       *
## Akbarzadeh 2017               -0.0143 [-0.4897;  0.4610]        1.9        
## Al - Ghamdi 2016               0.8207 [-0.0656;  1.7070]        1.0        
## Al - Ghamdi 2016               3.0833 [ 1.8624;  4.3042]        0.0       *
## Al - Ghamdi 2016               1.3619 [ 0.4723;  2.2516]        0.0       *
## Ali 2012                      -0.6936 [-1.2658; -0.1214]        0.0       *
## Ali 2017                      -0.5621 [-1.0788; -0.0455]        0.0       *
## Altun 2018                    -0.3879 [-0.9295;  0.1536]        1.7        
## Altun 2018                    -1.4170 [-2.0144; -0.8195]        0.0       *
## Amini 2015                     0.8256 [ 0.3364;  1.3148]        1.9        
## Ander 2017                    -0.1410 [-0.5854;  0.3034]        2.0        
## Aoi 2010                       0.2745 [-0.3822;  0.9313]        1.4        
## Aqil 2016                     -0.6713 [-1.1224; -0.2203]        0.0       *
## Aqil 2017                     -0.5322 [-0.8695; -0.1950]        0.0       *
## Arici 2014                     1.2926 [ 0.8086;  1.7767]        0.0       *
## Aziz 2012                      0.4704 [ 0.2245;  0.7163]        2.7        
## Bakshi - Nti 2015              1.0686 [ 0.0930;  2.0441]        0.9        
## Bakshi - Nti 2015              1.0686 [ 0.0930;  2.0441]        0.9        
## Bakshi - Nvl 2015              7.9734 [ 5.1484; 10.7985]        0.0       *
## Bakshi - Nvl 2015              6.9883 [ 4.4741;  9.5026]        0.0       *
## Bakshi - Exp 2015              5.1551 [ 3.2018;  7.1083]        0.0       *
## Bakshi - Exp 2015              4.9935 [ 3.0881;  6.8989]        0.0       *
## Bakshi 2019                    0.3914 [-0.0720;  0.8549]        1.9        
## Barak 2007                     0.7145 [ 0.4037;  1.0253]        2.4        
## Barman 2017                   -0.1143 [-0.5832;  0.3546]        1.9        
## Bashir 2020                   -0.4528 [-0.8969; -0.0086]        0.0       *
## Colak 2019                     1.0554 [ 0.6131;  1.4977]        0.0       *
## Kucukosman 2020                0.9848 [ 0.4469;  1.5227]        1.7        
## Wasem 2013                     0.1881 [-0.3192;  0.6954]        1.8        
## Yao 2015                       1.7045 [ 1.2347;  2.1742]        0.0       *
## Bhalla 2018                    1.4217 [ 0.6086;  2.2348]        0.0       *
## Bhandari 2013                 -2.7051 [-3.3190; -2.0912]        0.0       *
## Bharti 2014                    0.6737 [ 0.0263;  1.3210]        1.4        
## Bhat 2015                     -1.0098 [-1.4270; -0.5926]        0.0       *
## Bilehjani 2009                 0.9775 [ 0.5064;  1.4486]        0.0       *
## Blajic 2019                    0.0000 [-0.4445;  0.4445]        2.0        
## Blajic 2019                   -0.4018 [-0.8441;  0.0406]        0.0       *
## Cakir 2020                     0.6310 [ 0.1201;  1.1419]        1.8        
## Caparlar 2019                 -1.5320 [-2.0398; -1.0242]        0.0       *
## Carlino 2009                   0.4265 [-0.2985;  1.1514]        1.3        
## Chalkeidis 2010                0.7792 [ 0.2450;  1.3134]        1.7        
## Choi 2011                     -0.0782 [-0.5844;  0.4281]        1.8        
## Colak 2015                     0.7694 [ 0.2626;  1.2762]        1.8        
## Colak 2015                     1.3797 [ 0.8396;  1.9198]        0.0       *
## Dashti 2014                    1.2810 [ 0.7173;  1.8447]        0.0       *
## Dhonneur 2008                 -2.7088 [-3.0832; -2.3343]        0.0       *
## Di Marco 2011                 -0.7686 [-1.1600; -0.3772]        0.0       *
## Erden 2010                     1.6399 [ 0.8249;  2.4550]        0.0       *
## Goksu 2016                    -0.2230 [-0.5440;  0.0981]        0.0       *
## Gunes 2020                     1.4455 [ 1.1166;  1.7744]        0.0       *
## Gupta 2020                    -0.1654 [-0.6045;  0.2736]        2.0        
## Hamp 2015                      0.3828 [-0.2704;  1.0359]        1.4        
## Hirabayashi 2009              -0.7998 [-0.9785; -0.6211]        0.0       *
## Hirabayashi 2009              -0.4875 [-0.7689; -0.2062]        0.0       *
## Hirabayashi 2010              -0.1963 [-0.4741;  0.0816]        0.0       *
## Hsu 2012                      -0.7473 [-1.2719; -0.2226]        0.0       *
## Hu 2017                       -0.3505 [-0.6328; -0.0683]        0.0       *
## Ilyas 2014                     0.5321 [ 0.1794;  0.8849]        2.3        
## Jungbauer 2009                -0.3395 [-0.6187; -0.0602]        0.0       *
## Khan 2008                      0.2393 [-0.3539;  0.8326]        1.6        
## Kido 2015                     -0.6884 [-1.2603; -0.1165]        0.0       *
## Kim 2013                      -0.8025 [-1.4122; -0.1928]        0.0       *
## Koh 2010                      -0.9366 [-1.5231; -0.3502]        0.0       *
## Kucukosman 2020                0.9848 [ 0.4469;  1.5227]        1.7        
## Kumar_2 2019                  -0.2532 [-0.7615;  0.2550]        1.8        
## Kunaz 2016                     0.9117 [ 0.4991;  1.3243]        0.0       *
## Laosuwan 2015                  1.3395 [ 0.3964;  2.2825]        0.9        
## Lee 2013                       3.7949 [ 2.7219;  4.8678]        0.0       *
## Lim 2005                      -0.6018 [-1.1200; -0.0836]        0.0       *
## Liu 2014                      -1.5736 [-2.0782; -1.0690]        0.0       *
## Liu 2016                       0.1391 [-0.1559;  0.4341]        2.5        
## Liu 2019                      -0.0558 [-0.2624;  0.1508]        2.8        
## Maharaj 2006                  -0.0223 [-0.5284;  0.4838]        1.8        
## Maharaj 2007                  -0.7376 [-1.3804; -0.0949]        0.0       *
## Maharaj 2008                  -4.4937 [-5.7019; -3.2855]        0.0       *
## Mahmood 2015                  -0.9226 [-1.4567; -0.3885]        0.0       *
## Malik 2008                     1.5112 [ 0.7150;  2.3073]        0.0       *
## Malik 2008                     1.1925 [ 0.4257;  1.9593]        1.2        
## Malik 2008                     0.6892 [-0.0440;  1.4223]        1.2        
## Maruyama 2008                  0.9466 [ 0.0945;  1.7987]        1.0        
## Myunghun-Kim 2017              0.9853 [ 0.3251;  1.6455]        1.4        
## Inangil 2018                  -0.2170 [-0.6870;  0.2530]        1.9        
## Jafra 2018                     0.8912 [ 0.6003;  1.1820]        0.0       *
## Ndoko 2008                    -1.6035 [-2.0433; -1.1638]        0.0       *
## Nishiyama 2011                 0.4487 [-0.2332;  1.1305]        1.4        
## Nishiyama 2011                 1.4753 [ 0.7455;  2.2050]        0.0       *
## Nishiyama 2011                 2.8038 [ 1.9266;  3.6811]        0.0       *
## Pappu 2020                     1.0982 [ 0.4344;  1.7619]        1.4        
## Pappu 2020                     0.0881 [-0.5320;  0.7082]        1.5        
## Parasa 2016                    1.9667 [ 1.3434;  2.5901]        0.0       *
## Pazur 2016                    -0.0433 [-0.5870;  0.5004]        1.7        
## Peirovifar 2014               -1.6773 [-2.4080; -0.9465]        0.0       *
## Pournajafian 2014              1.4975 [ 1.0401;  1.9549]        0.0       *
## Ranieri 2012                  -1.4154 [-1.7983; -1.0326]        0.0       *
## Reena 2019                    -0.9104 [-1.3230; -0.4978]        0.0       *
## Shah 2016                     -0.3297 [-0.8438;  0.1844]        1.8        
## Shukla 2017                   -0.5891 [-1.0373; -0.1410]        0.0       *
## Sulser 2016                    0.0989 [-0.2246;  0.4224]        2.4        
## Sun 2005                       1.1310 [ 0.8321;  1.4300]        0.0       *
## Taylor 2013                    0.8800 [ 0.4415;  1.3185]        2.0        
## Teoh 2010                     -0.1486 [-0.5425;  0.2453]        2.2        
## Teoh 2010                      0.5652 [ 0.1657;  0.9647]        2.2        
## Teoh 2010                      0.5968 [ 0.2010;  0.9926]        2.2        
## Toker 2019                    -1.0109 [-1.4281; -0.5936]        0.0       *
## Tolon 2012                    -0.8071 [-1.4543; -0.1599]        0.0       *
## Tsan 2020                      0.6173 [ 0.2755;  0.9590]        2.3        
## Yumul 2016                    -0.6805 [-1.4132;  0.0522]        0.0       *
## Yumul 2016                    -0.0270 [-0.7427;  0.6887]        1.3        
## Yumul 2016                    -0.2275 [-0.9203;  0.4652]        1.3        
## El-Tahan 2017                  1.7259 [ 0.8548;  2.5970]        0.0       *
## Enomoto 2008                   0.1524 [-0.1232;  0.4281]        2.6        
## Maruyama 2008                  0.6037 [-0.2549;  1.4624]        1.0        
## Turkstra 2005                  0.9339 [-0.0524;  1.9202]        0.8        
## Zhao 2014                     -1.2951 [-1.6491; -0.9411]        0.0       *
## Serocki 2013                   0.7451 [ 0.1251;  1.3650]        1.5        
## Serocki 2013                   0.6208 [ 0.0034;  1.2382]        1.5        
## Arora 2013                     0.3881 [ 0.0072;  0.7690]        2.2        
## Avula 2019                     1.9217 [ 1.3031;  2.5402]        0.0       *
## 
## Number of studies combined: k = 58
## 
##                         SMD            95%-CI    z  p-value
## Random effects model 0.3650 [ 0.2590; 0.4710] 6.75 < 0.0001
## Prediction interval         [-0.2595; 0.9895]              
## 
## Quantifying heterogeneity:
##  tau^2 = 0.0943 [0.0526; 0.1922]; tau = 0.3070 [0.2293; 0.4384];
##  I^2 = 62.1% [49.8%; 71.5%]; H = 1.63 [1.41; 1.87]
## 
## Test of heterogeneity:
##       Q d.f.  p-value
##  150.59   57 < 0.0001
## 
## Details on meta-analytical method:
## - Inverse variance method
## - DerSimonian-Laird estimator for tau^2
## - Jackson method for confidence interval of tau^2 and tau
## - Hedges' g (bias corrected standardised mean difference)
#Meta-regression for time for intubation

#Controling for population characteristics (general, obese, neck immobilization)
meta::metareg(mcont_time,population)
## 
## Mixed-Effects Model (k = 126; tau^2 estimator: DL)
## 
## tau^2 (estimated amount of residual heterogeneity):     0.9310 (SE = 0.1679)
## tau (square root of estimated tau^2 value):             0.9649
## I^2 (residual heterogeneity / unaccounted variability): 94.29%
## H^2 (unaccounted variability / sampling variability):   17.50
## R^2 (amount of heterogeneity accounted for):            6.73%
## 
## Test for Residual Heterogeneity:
## QE(df = 120) = 2100.2723, p-val < .0001
## 
## Test of Moderators (coefficients 2:6):
## QM(df = 5) = 21.2501, p-val = 0.0007
## 
## Model Results:
## 
##                                estimate      se     zval    pval    ci.lb 
## intrcpt                          1.0019  0.2249   4.4543  <.0001   0.5611 
## populationElderly               -0.0186  0.7348  -0.0253  0.9798  -1.4587 
## populationGeneral               -0.8684  0.2546  -3.4108  0.0006  -1.3675 
## populationNeck Immobilization   -0.6201  0.3282  -1.8895  0.0588  -1.2633 
## populationObese                 -1.6482  0.4025  -4.0954  <.0001  -2.4370 
## populationPregnant Women        -0.2982  0.6167  -0.4835  0.6287  -1.5069 
##                                  ci.ub 
## intrcpt                         1.4428  *** 
## populationElderly               1.4215      
## populationGeneral              -0.3694  *** 
## populationNeck Immobilization   0.0231    . 
## populationObese                -0.8594  *** 
## populationPregnant Women        0.9105      
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for operator experience
meta::metareg(mcont_time,experience)
## 
## Mixed-Effects Model (k = 126; tau^2 estimator: DL)
## 
## tau^2 (estimated amount of residual heterogeneity):     1.0270 (SE = 0.1801)
## tau (square root of estimated tau^2 value):             1.0134
## I^2 (residual heterogeneity / unaccounted variability): 94.78%
## H^2 (unaccounted variability / sampling variability):   19.15
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 123) = 2355.4501, p-val < .0001
## 
## Test of Moderators (coefficients 2:3):
## QM(df = 2) = 2.0211, p-val = 0.3640
## 
## Model Results:
## 
##                            estimate      se    zval    pval    ci.lb   ci.ub 
## intrcpt                      0.0751  0.2248  0.3341  0.7383  -0.3656  0.5158    
## experienceExperienced        0.1925  0.2570  0.7491  0.4538  -0.3112  0.6962    
## experienceNon-experienced    0.4152  0.2964  1.4010  0.1612  -0.1657  0.9961    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for intubation technique applied (regular, rapid sequence induction)
meta::metareg(mcont_time,technique)
## 
## Mixed-Effects Model (k = 126; tau^2 estimator: DL)
## 
## tau^2 (estimated amount of residual heterogeneity):     1.0092 (SE = 0.1758)
## tau (square root of estimated tau^2 value):             1.0046
## I^2 (residual heterogeneity / unaccounted variability): 94.66%
## H^2 (unaccounted variability / sampling variability):   18.74
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 123) = 2304.6703, p-val < .0001
## 
## Test of Moderators (coefficients 2:3):
## QM(df = 2) = 10.8042, p-val = 0.0045
## 
## Model Results:
## 
##                                    estimate      se     zval    pval    ci.lb 
## intrcpt                              0.7895  0.1999   3.9497  <.0001   0.3977 
## techniqueRapid Sequence Induction   -1.3739  0.5004  -2.7454  0.0060  -2.3548 
## techniqueRegular                    -0.6052  0.2280  -2.6544  0.0079  -1.0520 
##                                      ci.ub 
## intrcpt                             1.1813  *** 
## techniqueRapid Sequence Induction  -0.3931   ** 
## techniqueRegular                   -0.1583   ** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for setting (operating room, out of hospital, etc.)
meta::metareg(mcont_time,set)
## 
## Mixed-Effects Model (k = 126; tau^2 estimator: DL)
## 
## tau^2 (estimated amount of residual heterogeneity):     1.0211 (SE = 0.1817)
## tau (square root of estimated tau^2 value):             1.0105
## I^2 (residual heterogeneity / unaccounted variability): 94.78%
## H^2 (unaccounted variability / sampling variability):   19.14
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 123) = 2354.7840, p-val < .0001
## 
## Test of Moderators (coefficients 2:3):
## QM(df = 2) = 0.2380, p-val = 0.8878
## 
## Model Results:
## 
##                    estimate      se     zval    pval    ci.lb   ci.ub 
## intrcpt             -0.0620  0.7239  -0.0857  0.9317  -1.4809  1.3568    
## setICU               0.3239  1.0356   0.3127  0.7545  -1.7058  2.3536    
## setOperating Room    0.3554  0.7303   0.4867  0.6265  -1.0759  1.7867    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for nature of procedure (elective, urgency)
meta::metareg(mcont_time,nature)
## 
## Mixed-Effects Model (k = 126; tau^2 estimator: DL)
## 
## tau^2 (estimated amount of residual heterogeneity):     1.0292 (SE = 0.1788)
## tau (square root of estimated tau^2 value):             1.0145
## I^2 (residual heterogeneity / unaccounted variability): 94.75%
## H^2 (unaccounted variability / sampling variability):   19.05
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 123) = 2343.3999, p-val < .0001
## 
## Test of Moderators (coefficients 2:3):
## QM(df = 2) = 6.8274, p-val = 0.0329
## 
## Model Results:
## 
##                 estimate      se     zval    pval    ci.lb    ci.ub 
## intrcpt           0.8001  0.2183   3.6652  0.0002   0.3723   1.2280  *** 
## natureElective   -0.6295  0.2434  -2.5860  0.0097  -1.1065  -0.1524   ** 
## natureUrgent     -0.7039  0.5636  -1.2488  0.2117  -1.8085   0.4008      
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for inducer used
meta::metareg(mcont_time,inducer)
## 
## Mixed-Effects Model (k = 126; tau^2 estimator: DL)
## 
## tau^2 (estimated amount of residual heterogeneity):     1.0525 (SE = 0.1840)
## tau (square root of estimated tau^2 value):             1.0259
## I^2 (residual heterogeneity / unaccounted variability): 94.64%
## H^2 (unaccounted variability / sampling variability):   18.65
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 118) = 2201.2501, p-val < .0001
## 
## Test of Moderators (coefficients 2:8):
## QM(df = 7) = 8.6177, p-val = 0.2813
## 
## Model Results:
## 
##                             estimate      se     zval    pval    ci.lb   ci.ub 
## intrcpt                       0.0446  0.3305   0.1350  0.8926  -0.6032  0.6924 
## inducerEtomidate             -0.1004  1.0830  -0.0927  0.9261  -2.2231  2.0222 
## inducerMidazolam              0.3356  0.8151   0.4117  0.6806  -1.2619  1.9331 
## inducerPropofol               0.1766  0.3485   0.5068  0.6123  -0.5064  0.8596 
## inducerPropofol/Ketamine      0.4615  0.6364   0.7253  0.4683  -0.7857  1.7088 
## inducerPropofol/Midazolam     1.4987  0.5853   2.5606  0.0104   0.3516  2.6458 
## inducerPropofol/Thiopental    0.0543  1.0904   0.0498  0.9603  -2.0829  2.1915 
## inducerThipental              0.5645  0.5212   1.0832  0.2787  -0.4570  1.5861 
##  
## intrcpt 
## inducerEtomidate 
## inducerMidazolam 
## inducerPropofol 
## inducerPropofol/Ketamine 
## inducerPropofol/Midazolam   * 
## inducerPropofol/Thiopental 
## inducerThipental 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for opioid used
meta::metareg(mcont_time,opioid)
## 
## Mixed-Effects Model (k = 126; tau^2 estimator: DL)
## 
## tau^2 (estimated amount of residual heterogeneity):     1.0184 (SE = 0.1795)
## tau (square root of estimated tau^2 value):             1.0092
## I^2 (residual heterogeneity / unaccounted variability): 94.62%
## H^2 (unaccounted variability / sampling variability):   18.60
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 120) = 2231.7153, p-val < .0001
## 
## Test of Moderators (coefficients 2:6):
## QM(df = 5) = 7.9138, p-val = 0.1611
## 
## Model Results:
## 
##                             estimate      se     zval    pval    ci.lb    ci.ub 
## intrcpt                       0.2127  0.1938   1.0971  0.2726  -0.1672   0.5926 
## opioidFentanyl                0.1330  0.2294   0.5799  0.5620  -0.3167   0.5828 
## opioidFentanyl/Sulfentanil    0.6673  1.0517   0.6345  0.5257  -1.3939   2.7286 
## opioidMorphine                0.1755  1.0458   0.1678  0.8668  -1.8743   2.2252 
## opioidRemifentanil            0.4187  0.3633   1.1523  0.2492  -0.2935   1.1308 
## opioidSulfentanil            -0.8760  0.4353  -2.0122  0.0442  -1.7292  -0.0227 
##  
## intrcpt 
## opioidFentanyl 
## opioidFentanyl/Sulfentanil 
## opioidMorphine 
## opioidRemifentanil 
## opioidSulfentanil           * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Controling for neuromuscular blocking agent used
meta::metareg(mcont_time,blocker)
## 
## Mixed-Effects Model (k = 126; tau^2 estimator: DL)
## 
## tau^2 (estimated amount of residual heterogeneity):     1.0136 (SE = 0.1813)
## tau (square root of estimated tau^2 value):             1.0068
## I^2 (residual heterogeneity / unaccounted variability): 94.59%
## H^2 (unaccounted variability / sampling variability):   18.48
## R^2 (amount of heterogeneity accounted for):            0.00%
## 
## Test for Residual Heterogeneity:
## QE(df = 118) = 2180.4655, p-val < .0001
## 
## Test of Moderators (coefficients 2:8):
## QM(df = 7) = 18.4873, p-val = 0.0100
## 
## Model Results:
## 
##                                    estimate      se     zval    pval    ci.lb 
## intrcpt                              0.2417  0.2587   0.9343  0.3501  -0.2653 
## blockerAtracurium                   -0.0612  0.3701  -0.1652  0.8687  -0.7866 
## blockerCisatracurium                -0.1171  0.4327  -0.2707  0.7866  -0.9652 
## blockerRocuronium                    0.0298  0.3023   0.0985  0.9216  -0.5628 
## blockerRocuronium/Vecuronium         0.1498  1.0660   0.1405  0.8883  -1.9396 
## blockerSuccinylcholine              -0.6772  0.3735  -1.8131  0.0698  -1.4092 
## blockerSuccinylcholine/Rocuronium   -0.1428  1.0525  -0.1356  0.8921  -2.2056 
## blockerVecuronium                    0.8110  0.3479   2.3310  0.0198   0.1291 
##                                     ci.ub 
## intrcpt                            0.7487    
## blockerAtracurium                  0.6643    
## blockerCisatracurium               0.7309    
## blockerRocuronium                  0.6223    
## blockerRocuronium/Vecuronium       2.2391    
## blockerSuccinylcholine             0.0549  . 
## blockerSuccinylcholine/Rocuronium  1.9201    
## blockerVecuronium                  1.4929  * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Multiple Meta-regression for time for intubation
str(time)
## 'data.frame':    126 obs. of  20 variables:
##  $ id          : int  2 3 3 4 4 5 7 8 11 12 ...
##  $ author      : Factor w/ 95 levels "Abdallah","Abdelgalel",..: 1 2 2 4 3 5 6 6 7 8 ...
##  $ year        : int  2019 2018 2018 2015 2015 2019 2016 2015 2015 2017 ...
##  $ population  : Factor w/ 6 levels "","Elderly","General",..: 3 3 1 3 1 3 4 3 4 5 ...
##  $ predicted   : Factor w/ 3 levels "","Difficult",..: 3 2 1 3 1 3 2 3 2 2 ...
##  $ set         : Factor w/ 3 levels "Emergency department",..: 3 2 2 3 3 3 3 3 3 3 ...
##  $ nature      : Factor w/ 3 levels "","Elective",..: 2 3 3 2 1 2 2 2 2 2 ...
##  $ technique   : Factor w/ 3 levels "","Rapid Sequence Induction",..: 3 2 1 3 1 3 1 3 3 3 ...
##  $ experience  : Factor w/ 3 levels "","Experienced",..: 2 2 2 2 2 2 2 2 2 1 ...
##  $ intervention: Factor w/ 15 levels "Airtraq","C-MAC D",..: 1 4 1 14 14 3 13 4 3 4 ...
##  $ reference   : Factor w/ 1 level "Macintosh": 1 1 1 1 1 1 1 1 1 1 ...
##  $ inducer     : Factor w/ 8 levels "","Etomidate",..: 4 5 5 4 4 4 1 4 4 8 ...
##  $ opioid      : Factor w/ 6 levels "","Fentanyl",..: 2 2 2 2 2 1 1 1 2 2 ...
##  $ blocker     : Factor w/ 8 levels "","Atracurium",..: 3 4 4 4 4 2 1 4 4 2 ...
##  $ mtime.1     : num  11.5 31.4 32.2 19.1 19.1 ...
##  $ sdtime.1    : num  4.36 12.17 11.96 2.4 3.8 ...
##  $ ftime.1     : int  35 40 40 20 20 50 78 25 45 34 ...
##  $ mtime.2     : num  14.2 28.8 28.8 26.1 27.8 ...
##  $ sdtime.2    : num  3.42 10.27 10.27 3.2 4.2 ...
##  $ ftime.2     : int  35 20 20 20 20 50 96 25 45 34 ...
model_time<-metafor::rma.uni(m1i = mtime.1,sd1i = sdtime.1,n1i = ftime.1,m2i = mtime.2,sd2i = sdtime.2,n2i = ftime.2,data = time,method = "ML", measure = "SMD", mods = ~experience+technique+population+set+nature+inducer+opioid+blocker,test = "knha")
## Warning in metafor::rma.uni(m1i = mtime.1, sd1i = sdtime.1, n1i = ftime.1, :
## Redundant predictors dropped from the model.
model_time
## 
## Mixed-Effects Model (k = 126; tau^2 estimator: ML)
## 
## tau^2 (estimated amount of residual heterogeneity):     1.4802 (SE = 0.1994)
## tau (square root of estimated tau^2 value):             1.2166
## I^2 (residual heterogeneity / unaccounted variability): 95.75%
## H^2 (unaccounted variability / sampling variability):   23.55
## R^2 (amount of heterogeneity accounted for):            23.71%
## 
## Test for Residual Heterogeneity:
## QE(df = 95) = 1754.3477, p-val < .0001
## 
## Test of Moderators (coefficients 2:31):
## F(df1 = 30, df2 = 95) = 0.8896, p-val = 0.6322
## 
## Model Results:
## 
##                                    estimate      se     tval    pval    ci.lb 
## intrcpt                              0.6181  2.0799   0.2972  0.7670  -3.5111 
## experienceExperienced                0.4609  0.4516   1.0206  0.3100  -0.4356 
## experienceNon-experienced            0.6892  0.5184   1.3295  0.1869  -0.3400 
## techniqueRapid Sequence Induction   -0.3293  1.3368  -0.2463  0.8059  -2.9831 
## techniqueRegular                    -0.4714  0.8371  -0.5631  0.5747  -2.1332 
## populationElderly                   -0.0726  1.5254  -0.0476  0.9621  -3.1009 
## populationGeneral                   -1.2010  1.0549  -1.1384  0.2578  -3.2953 
## populationNeck Immobilization       -0.8901  1.1706  -0.7603  0.4489  -3.2141 
## populationObese                     -1.3164  1.2492  -1.0537  0.2947  -3.7964 
## populationPregnant Women            -0.6677  1.1851  -0.5634  0.5745  -3.0205 
## setICU                              -0.1433  2.4159  -0.0593  0.9528  -4.9396 
## setOperating Room                   -0.2640  2.0434  -0.1292  0.8975  -4.3207 
## natureElective                       0.7036  0.9450   0.7445  0.4584  -1.1724 
## inducerEtomidate                     0.9619  1.9255   0.4996  0.6185  -2.8608 
## inducerMidazolam                     1.2920  1.5954   0.8099  0.4200  -1.8752 
## inducerPropofol                      0.6176  1.0467   0.5900  0.5566  -1.4605 
## inducerPropofol/Ketamine             0.7001  1.6768   0.4175  0.6773  -2.6288 
## inducerPropofol/Midazolam            1.0104  1.2463   0.8107  0.4196  -1.4639 
## inducerPropofol/Thiopental           0.6759  2.1897   0.3087  0.7583  -3.6713 
## inducerThipental                     1.5161  1.2606   1.2027  0.2321  -0.9864 
## opioidFentanyl                      -0.1257  0.4509  -0.2788  0.7810  -1.0209 
## opioidFentanyl/Sulfentanil           1.0492  1.6373   0.6409  0.5232  -2.2011 
## opioidMorphine                      -0.5805  1.6141  -0.3596  0.7199  -3.7850 
## opioidRemifentanil                   0.4541  0.6280   0.7231  0.4714  -0.7926 
## opioidSulfentanil                   -0.3809  0.7647  -0.4981  0.6196  -1.8991 
## blockerAtracurium                   -0.6085  0.8018  -0.7589  0.4498  -2.2002 
## blockerCisatracurium                -0.3206  0.8841  -0.3627  0.7177  -2.0757 
## blockerRocuronium                   -0.4830  0.7474  -0.6463  0.5196  -1.9667 
## blockerRocuronium/Vecuronium        -0.1749  1.6984  -0.1030  0.9182  -3.5466 
## blockerSuccinylcholine              -1.1601  0.8510  -1.3631  0.1761  -2.8496 
## blockerVecuronium                    0.5048  0.7643   0.6605  0.5105  -1.0124 
##                                     ci.ub 
## intrcpt                            4.7473    
## experienceExperienced              1.3575    
## experienceNon-experienced          1.7184    
## techniqueRapid Sequence Induction  2.3245    
## techniqueRegular                   1.1904    
## populationElderly                  2.9556    
## populationGeneral                  0.8933    
## populationNeck Immobilization      1.4339    
## populationObese                    1.1637    
## populationPregnant Women           1.6851    
## setICU                             4.6529    
## setOperating Room                  3.7926    
## natureElective                     2.5796    
## inducerEtomidate                   4.7846    
## inducerMidazolam                   4.4592    
## inducerPropofol                    2.6956    
## inducerPropofol/Ketamine           4.0289    
## inducerPropofol/Midazolam          3.4847    
## inducerPropofol/Thiopental         5.0231    
## inducerThipental                   4.0186    
## opioidFentanyl                     0.7695    
## opioidFentanyl/Sulfentanil         4.2996    
## opioidMorphine                     2.6239    
## opioidRemifentanil                 1.7009    
## opioidSulfentanil                  1.1373    
## blockerAtracurium                  0.9832    
## blockerCisatracurium               1.4345    
## blockerRocuronium                  1.0007    
## blockerRocuronium/Vecuronium       3.1968    
## blockerSuccinylcholine             0.5294    
## blockerVecuronium                  2.0221    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
metafor::permutest(model_time)
## Running 1000 iterations for approximate permutation test.
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |                                                                      |   1%
  |                                                                            
  |=                                                                     |   1%
  |                                                                            
  |=                                                                     |   2%
  |                                                                            
  |==                                                                    |   2%
  |                                                                            
  |==                                                                    |   3%
  |                                                                            
  |==                                                                    |   4%
  |                                                                            
  |===                                                                   |   4%
  |                                                                            
  |===                                                                   |   5%
  |                                                                            
  |====                                                                  |   5%
  |                                                                            
  |====                                                                  |   6%
  |                                                                            
  |=====                                                                 |   6%
  |                                                                            
  |=====                                                                 |   7%
  |                                                                            
  |=====                                                                 |   8%
  |                                                                            
  |======                                                                |   8%
  |                                                                            
  |======                                                                |   9%
  |                                                                            
  |=======                                                               |   9%
  |                                                                            
  |=======                                                               |  10%
  |                                                                            
  |=======                                                               |  11%
  |                                                                            
  |========                                                              |  11%
  |                                                                            
  |========                                                              |  12%
  |                                                                            
  |=========                                                             |  12%
  |                                                                            
  |=========                                                             |  13%
  |                                                                            
  |=========                                                             |  14%
  |                                                                            
  |==========                                                            |  14%
  |                                                                            
  |==========                                                            |  15%
  |                                                                            
  |===========                                                           |  15%
  |                                                                            
  |===========                                                           |  16%
  |                                                                            
  |============                                                          |  16%
  |                                                                            
  |============                                                          |  17%
  |                                                                            
  |============                                                          |  18%
  |                                                                            
  |=============                                                         |  18%
  |                                                                            
  |=============                                                         |  19%
  |                                                                            
  |==============                                                        |  19%
  |                                                                            
  |==============                                                        |  20%
  |                                                                            
  |==============                                                        |  21%
  |                                                                            
  |===============                                                       |  21%
  |                                                                            
  |===============                                                       |  22%
  |                                                                            
  |================                                                      |  22%
  |                                                                            
  |================                                                      |  23%
  |                                                                            
  |================                                                      |  24%
  |                                                                            
  |=================                                                     |  24%
  |                                                                            
  |=================                                                     |  25%
  |                                                                            
  |==================                                                    |  25%
  |                                                                            
  |==================                                                    |  26%
  |                                                                            
  |===================                                                   |  26%
  |                                                                            
  |===================                                                   |  27%
  |                                                                            
  |===================                                                   |  28%
  |                                                                            
  |====================                                                  |  28%
  |                                                                            
  |====================                                                  |  29%
  |                                                                            
  |=====================                                                 |  29%
  |                                                                            
  |=====================                                                 |  30%
  |                                                                            
  |=====================                                                 |  31%
  |                                                                            
  |======================                                                |  31%
  |                                                                            
  |======================                                                |  32%
  |                                                                            
  |=======================                                               |  32%
  |                                                                            
  |=======================                                               |  33%
  |                                                                            
  |=======================                                               |  34%
  |                                                                            
  |========================                                              |  34%
  |                                                                            
  |========================                                              |  35%
  |                                                                            
  |=========================                                             |  35%
  |                                                                            
  |=========================                                             |  36%
  |                                                                            
  |==========================                                            |  36%
  |                                                                            
  |==========================                                            |  37%
  |                                                                            
  |==========================                                            |  38%
  |                                                                            
  |===========================                                           |  38%
  |                                                                            
  |===========================                                           |  39%
  |                                                                            
  |============================                                          |  39%
  |                                                                            
  |============================                                          |  40%
  |                                                                            
  |============================                                          |  41%
  |                                                                            
  |=============================                                         |  41%
  |                                                                            
  |=============================                                         |  42%
  |                                                                            
  |==============================                                        |  42%
  |                                                                            
  |==============================                                        |  43%
  |                                                                            
  |==============================                                        |  44%
  |                                                                            
  |===============================                                       |  44%
  |                                                                            
  |===============================                                       |  45%
  |                                                                            
  |================================                                      |  45%
  |                                                                            
  |================================                                      |  46%
  |                                                                            
  |=================================                                     |  46%
  |                                                                            
  |=================================                                     |  47%
  |                                                                            
  |=================================                                     |  48%
  |                                                                            
  |==================================                                    |  48%
  |                                                                            
  |==================================                                    |  49%
  |                                                                            
  |===================================                                   |  49%
  |                                                                            
  |===================================                                   |  50%
  |                                                                            
  |===================================                                   |  51%
  |                                                                            
  |====================================                                  |  51%
  |                                                                            
  |====================================                                  |  52%
  |                                                                            
  |=====================================                                 |  52%
  |                                                                            
  |=====================================                                 |  53%
  |                                                                            
  |=====================================                                 |  54%
  |                                                                            
  |======================================                                |  54%
  |                                                                            
  |======================================                                |  55%
  |                                                                            
  |=======================================                               |  55%
  |                                                                            
  |=======================================                               |  56%
  |                                                                            
  |========================================                              |  56%
  |                                                                            
  |========================================                              |  57%
  |                                                                            
  |========================================                              |  58%
  |                                                                            
  |=========================================                             |  58%
  |                                                                            
  |=========================================                             |  59%
  |                                                                            
  |==========================================                            |  59%
  |                                                                            
  |==========================================                            |  60%
  |                                                                            
  |==========================================                            |  61%
  |                                                                            
  |===========================================                           |  61%
  |                                                                            
  |===========================================                           |  62%
  |                                                                            
  |============================================                          |  62%
  |                                                                            
  |============================================                          |  63%
  |                                                                            
  |============================================                          |  64%
  |                                                                            
  |=============================================                         |  64%
  |                                                                            
  |=============================================                         |  65%
  |                                                                            
  |==============================================                        |  65%
  |                                                                            
  |==============================================                        |  66%
  |                                                                            
  |===============================================                       |  66%
  |                                                                            
  |===============================================                       |  67%
  |                                                                            
  |===============================================                       |  68%
  |                                                                            
  |================================================                      |  68%
  |                                                                            
  |================================================                      |  69%
  |                                                                            
  |=================================================                     |  69%
  |                                                                            
  |=================================================                     |  70%
  |                                                                            
  |=================================================                     |  71%
  |                                                                            
  |==================================================                    |  71%
  |                                                                            
  |==================================================                    |  72%
  |                                                                            
  |===================================================                   |  72%
  |                                                                            
  |===================================================                   |  73%
  |                                                                            
  |===================================================                   |  74%
  |                                                                            
  |====================================================                  |  74%
  |                                                                            
  |====================================================                  |  75%
  |                                                                            
  |=====================================================                 |  75%
  |                                                                            
  |=====================================================                 |  76%
  |                                                                            
  |======================================================                |  76%
  |                                                                            
  |======================================================                |  77%
  |                                                                            
  |======================================================                |  78%
  |                                                                            
  |=======================================================               |  78%
  |                                                                            
  |=======================================================               |  79%
  |                                                                            
  |========================================================              |  79%
  |                                                                            
  |========================================================              |  80%
  |                                                                            
  |========================================================              |  81%
  |                                                                            
  |=========================================================             |  81%
  |                                                                            
  |=========================================================             |  82%
  |                                                                            
  |==========================================================            |  82%
  |                                                                            
  |==========================================================            |  83%
  |                                                                            
  |==========================================================            |  84%
  |                                                                            
  |===========================================================           |  84%
  |                                                                            
  |===========================================================           |  85%
  |                                                                            
  |============================================================          |  85%
  |                                                                            
  |============================================================          |  86%
  |                                                                            
  |=============================================================         |  86%
  |                                                                            
  |=============================================================         |  87%
  |                                                                            
  |=============================================================         |  88%
  |                                                                            
  |==============================================================        |  88%
  |                                                                            
  |==============================================================        |  89%
  |                                                                            
  |===============================================================       |  89%
  |                                                                            
  |===============================================================       |  90%
  |                                                                            
  |===============================================================       |  91%
  |                                                                            
  |================================================================      |  91%
  |                                                                            
  |================================================================      |  92%
  |                                                                            
  |=================================================================     |  92%
  |                                                                            
  |=================================================================     |  93%
  |                                                                            
  |=================================================================     |  94%
  |                                                                            
  |==================================================================    |  94%
  |                                                                            
  |==================================================================    |  95%
  |                                                                            
  |===================================================================   |  95%
  |                                                                            
  |===================================================================   |  96%
  |                                                                            
  |====================================================================  |  96%
  |                                                                            
  |====================================================================  |  97%
  |                                                                            
  |====================================================================  |  98%
  |                                                                            
  |===================================================================== |  98%
  |                                                                            
  |===================================================================== |  99%
  |                                                                            
  |======================================================================|  99%
  |                                                                            
  |======================================================================| 100%
## 
## Test of Moderators (coefficients 2:31):
## F(df1 = 30, df2 = 95) = 0.8896, p-val* = 0.4760
## 
## Model Results:
## 
##                                    estimate      se     tval   pval*    ci.lb 
## intrcpt                              0.6181  2.0799   0.2972  0.7530  -3.5111 
## experienceExperienced                0.4609  0.4516   1.0206  0.2790  -0.4356 
## experienceNon-experienced            0.6892  0.5184   1.3295  0.1680  -0.3400 
## techniqueRapid Sequence Induction   -0.3293  1.3368  -0.2463  0.7850  -2.9831 
## techniqueRegular                    -0.4714  0.8371  -0.5631  0.5480  -2.1332 
## populationElderly                   -0.0726  1.5254  -0.0476  0.9630  -3.1009 
## populationGeneral                   -1.2010  1.0549  -1.1384  0.2390  -3.2953 
## populationNeck Immobilization       -0.8901  1.1706  -0.7603  0.4010  -3.2141 
## populationObese                     -1.3164  1.2492  -1.0537  0.2620  -3.7964 
## populationPregnant Women            -0.6677  1.1851  -0.5634  0.5560  -3.0205 
## setICU                              -0.1433  2.4159  -0.0593  0.9410  -4.9396 
## setOperating Room                   -0.2640  2.0434  -0.1292  0.8880  -4.3207 
## natureElective                       0.7036  0.9450   0.7445  0.4530  -1.1724 
## inducerEtomidate                     0.9619  1.9255   0.4996  0.5640  -2.8608 
## inducerMidazolam                     1.2920  1.5954   0.8099  0.3820  -1.8752 
## inducerPropofol                      0.6176  1.0467   0.5900  0.5440  -1.4605 
## inducerPropofol/Ketamine             0.7001  1.6768   0.4175  0.6480  -2.6288 
## inducerPropofol/Midazolam            1.0104  1.2463   0.8107  0.4180  -1.4639 
## inducerPropofol/Thiopental           0.6759  2.1897   0.3087  0.6900  -3.6713 
## inducerThipental                     1.5161  1.2606   1.2027  0.2180  -0.9864 
## opioidFentanyl                      -0.1257  0.4509  -0.2788  0.7560  -1.0209 
## opioidFentanyl/Sulfentanil           1.0492  1.6373   0.6409  0.4350  -2.2011 
## opioidMorphine                      -0.5805  1.6141  -0.3596  0.6410  -3.7850 
## opioidRemifentanil                   0.4541  0.6280   0.7231  0.4360  -0.7926 
## opioidSulfentanil                   -0.3809  0.7647  -0.4981  0.5990  -1.8991 
## blockerAtracurium                   -0.6085  0.8018  -0.7589  0.4450  -2.2002 
## blockerCisatracurium                -0.3206  0.8841  -0.3627  0.6710  -2.0757 
## blockerRocuronium                   -0.4830  0.7474  -0.6463  0.4960  -1.9667 
## blockerRocuronium/Vecuronium        -0.1749  1.6984  -0.1030  0.8940  -3.5466 
## blockerSuccinylcholine              -1.1601  0.8510  -1.3631  0.1550  -2.8496 
## blockerVecuronium                    0.5048  0.7643   0.6605  0.5090  -1.0124 
##                                     ci.ub 
## intrcpt                            4.7473    
## experienceExperienced              1.3575    
## experienceNon-experienced          1.7184    
## techniqueRapid Sequence Induction  2.3245    
## techniqueRegular                   1.1904    
## populationElderly                  2.9556    
## populationGeneral                  0.8933    
## populationNeck Immobilization      1.4339    
## populationObese                    1.1637    
## populationPregnant Women           1.6851    
## setICU                             4.6529    
## setOperating Room                  3.7926    
## natureElective                     2.5796    
## inducerEtomidate                   4.7846    
## inducerMidazolam                   4.4592    
## inducerPropofol                    2.6956    
## inducerPropofol/Ketamine           4.0289    
## inducerPropofol/Midazolam          3.4847    
## inducerPropofol/Thiopental         5.0231    
## inducerThipental                   4.0186    
## opioidFentanyl                     0.7695    
## opioidFentanyl/Sulfentanil         4.2996    
## opioidMorphine                     2.6239    
## opioidRemifentanil                 1.7009    
## opioidSulfentanil                  1.1373    
## blockerAtracurium                  0.9832    
## blockerCisatracurium               1.4345    
## blockerRocuronium                  1.0007    
## blockerRocuronium/Vecuronium       3.1968    
## blockerSuccinylcholine             0.5294    
## blockerVecuronium                  2.0221    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Funnel Plot for time for intubation
meta::funnel(mcont_time,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))
meta::metabias(mcont_time, method.bias = "linreg")
## 
##  Linear regression test of funnel plot asymmetry
## 
## data:  mcont_time
## t = 3.1084, df = 124, p-value = 0.002333
## alternative hypothesis: asymmetry in funnel plot
## sample estimates:
##       bias    se.bias  intercept 
##  3.0190397  0.9712496 -0.6103264
dmetar::eggers.test(mcont_time)
##              Intercept ConfidenceInterval     t       p
## Egger's test     3.019        1.059-4.979 3.108 0.00233
meta::funnel(mcont_time,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))

trimfill_time<-meta::trimfill(mcont_time)
trimfill_time
##                                   SMD              95%-CI %W(random)
## Abdallah 2019                 -0.6764 [ -1.1589; -0.1939]        0.7
## Abdelgalel 2018                0.2258 [ -0.3126;  0.7642]        0.7
## Abdelgalel 2018                0.2978 [ -0.2418;  0.8374]        0.7
## Abdelgawad - Normotensos 2015 -2.4257 [ -3.2609; -1.5905]        0.6
## Abdelgawad - Hipertensos 2015 -2.1291 [ -2.9201; -1.3382]        0.6
## Aggarwal 2019                  5.0579 [  4.2423;  5.8735]        0.6
## Ahmad 2016                     4.0959 [  3.5679;  4.6238]        0.7
## Ahmad 2015                     0.5655 [ -0.0008;  1.1317]        0.7
## Akbar 2015                    -0.7636 [ -1.1923; -0.3350]        0.7
## Akbarzadeh 2017               -0.0143 [ -0.4897;  0.4610]        0.7
## Al - Ghamdi 2016               0.8207 [ -0.0656;  1.7070]        0.6
## Al - Ghamdi 2016               3.0833 [  1.8624;  4.3042]        0.6
## Al - Ghamdi 2016               1.3619 [  0.4723;  2.2516]        0.6
## Ali 2012                      -0.6936 [ -1.2658; -0.1214]        0.7
## Ali 2017                      -0.5621 [ -1.0788; -0.0455]        0.7
## Altun 2018                    -0.3879 [ -0.9295;  0.1536]        0.7
## Altun 2018                    -1.4170 [ -2.0144; -0.8195]        0.7
## Amini 2015                     0.8256 [  0.3364;  1.3148]        0.7
## Ander 2017                    -0.1410 [ -0.5854;  0.3034]        0.7
## Aoi 2010                       0.2745 [ -0.3822;  0.9313]        0.7
## Aqil 2016                     -0.6713 [ -1.1224; -0.2203]        0.7
## Aqil 2017                     -0.5322 [ -0.8695; -0.1950]        0.7
## Arici 2014                     1.2926 [  0.8086;  1.7767]        0.7
## Aziz 2012                      0.4704 [  0.2245;  0.7163]        0.7
## Bakshi - Nti 2015              1.0686 [  0.0930;  2.0441]        0.6
## Bakshi - Nti 2015              1.0686 [  0.0930;  2.0441]        0.6
## Bakshi - Nvl 2015              7.9734 [  5.1484; 10.7985]        0.3
## Bakshi - Nvl 2015              6.9883 [  4.4741;  9.5026]        0.3
## Bakshi - Exp 2015              5.1551 [  3.2018;  7.1083]        0.4
## Bakshi - Exp 2015              4.9935 [  3.0881;  6.8989]        0.4
## Bakshi 2019                    0.3914 [ -0.0720;  0.8549]        0.7
## Barak 2007                     0.7145 [  0.4037;  1.0253]        0.7
## Barman 2017                   -0.1143 [ -0.5832;  0.3546]        0.7
## Bashir 2020                   -0.4528 [ -0.8969; -0.0086]        0.7
## Colak 2019                     1.0554 [  0.6131;  1.4977]        0.7
## Kucukosman 2020                0.9848 [  0.4469;  1.5227]        0.7
## Wasem 2013                     0.1881 [ -0.3192;  0.6954]        0.7
## Yao 2015                       1.7045 [  1.2347;  2.1742]        0.7
## Bhalla 2018                    1.4217 [  0.6086;  2.2348]        0.6
## Bhandari 2013                 -2.7051 [ -3.3190; -2.0912]        0.7
## Bharti 2014                    0.6737 [  0.0263;  1.3210]        0.7
## Bhat 2015                     -1.0098 [ -1.4270; -0.5926]        0.7
## Bilehjani 2009                 0.9775 [  0.5064;  1.4486]        0.7
## Blajic 2019                    0.0000 [ -0.4445;  0.4445]        0.7
## Blajic 2019                   -0.4018 [ -0.8441;  0.0406]        0.7
## Cakir 2020                     0.6310 [  0.1201;  1.1419]        0.7
## Caparlar 2019                 -1.5320 [ -2.0398; -1.0242]        0.7
## Carlino 2009                   0.4265 [ -0.2985;  1.1514]        0.7
## Chalkeidis 2010                0.7792 [  0.2450;  1.3134]        0.7
## Choi 2011                     -0.0782 [ -0.5844;  0.4281]        0.7
## Colak 2015                     0.7694 [  0.2626;  1.2762]        0.7
## Colak 2015                     1.3797 [  0.8396;  1.9198]        0.7
## Dashti 2014                    1.2810 [  0.7173;  1.8447]        0.7
## Dhonneur 2008                 -2.7088 [ -3.0832; -2.3343]        0.7
## Di Marco 2011                 -0.7686 [ -1.1600; -0.3772]        0.7
## Erden 2010                     1.6399 [  0.8249;  2.4550]        0.6
## Goksu 2016                    -0.2230 [ -0.5440;  0.0981]        0.7
## Gunes 2020                     1.4455 [  1.1166;  1.7744]        0.7
## Gupta 2020                    -0.1654 [ -0.6045;  0.2736]        0.7
## Hamp 2015                      0.3828 [ -0.2704;  1.0359]        0.7
## Hirabayashi 2009              -0.7998 [ -0.9785; -0.6211]        0.7
## Hirabayashi 2009              -0.4875 [ -0.7689; -0.2062]        0.7
## Hirabayashi 2010              -0.1963 [ -0.4741;  0.0816]        0.7
## Hsu 2012                      -0.7473 [ -1.2719; -0.2226]        0.7
## Hu 2017                       -0.3505 [ -0.6328; -0.0683]        0.7
## Ilyas 2014                     0.5321 [  0.1794;  0.8849]        0.7
## Jungbauer 2009                -0.3395 [ -0.6187; -0.0602]        0.7
## Khan 2008                      0.2393 [ -0.3539;  0.8326]        0.7
## Kido 2015                     -0.6884 [ -1.2603; -0.1165]        0.7
## Kim 2013                      -0.8025 [ -1.4122; -0.1928]        0.7
## Koh 2010                      -0.9366 [ -1.5231; -0.3502]        0.7
## Kucukosman 2020                0.9848 [  0.4469;  1.5227]        0.7
## Kumar_2 2019                  -0.2532 [ -0.7615;  0.2550]        0.7
## Kunaz 2016                     0.9117 [  0.4991;  1.3243]        0.7
## Laosuwan 2015                  1.3395 [  0.3964;  2.2825]        0.6
## Lee 2013                       3.7949 [  2.7219;  4.8678]        0.6
## Lim 2005                      -0.6018 [ -1.1200; -0.0836]        0.7
## Liu 2014                      -1.5736 [ -2.0782; -1.0690]        0.7
## Liu 2016                       0.1391 [ -0.1559;  0.4341]        0.7
## Liu 2019                      -0.0558 [ -0.2624;  0.1508]        0.7
## Maharaj 2006                  -0.0223 [ -0.5284;  0.4838]        0.7
## Maharaj 2007                  -0.7376 [ -1.3804; -0.0949]        0.7
## Maharaj 2008                  -4.4937 [ -5.7019; -3.2855]        0.6
## Mahmood 2015                  -0.9226 [ -1.4567; -0.3885]        0.7
## Malik 2008                     1.5112 [  0.7150;  2.3073]        0.6
## Malik 2008                     1.1925 [  0.4257;  1.9593]        0.7
## Malik 2008                     0.6892 [ -0.0440;  1.4223]        0.7
## Maruyama 2008                  0.9466 [  0.0945;  1.7987]        0.6
## Myunghun-Kim 2017              0.9853 [  0.3251;  1.6455]        0.7
## Inangil 2018                  -0.2170 [ -0.6870;  0.2530]        0.7
## Jafra 2018                     0.8912 [  0.6003;  1.1820]        0.7
## Ndoko 2008                    -1.6035 [ -2.0433; -1.1638]        0.7
## Nishiyama 2011                 0.4487 [ -0.2332;  1.1305]        0.7
## Nishiyama 2011                 1.4753 [  0.7455;  2.2050]        0.7
## Nishiyama 2011                 2.8038 [  1.9266;  3.6811]        0.6
## Pappu 2020                     1.0982 [  0.4344;  1.7619]        0.7
## Pappu 2020                     0.0881 [ -0.5320;  0.7082]        0.7
## Parasa 2016                    1.9667 [  1.3434;  2.5901]        0.7
## Pazur 2016                    -0.0433 [ -0.5870;  0.5004]        0.7
## Peirovifar 2014               -1.6773 [ -2.4080; -0.9465]        0.7
## Pournajafian 2014              1.4975 [  1.0401;  1.9549]        0.7
## Ranieri 2012                  -1.4154 [ -1.7983; -1.0326]        0.7
## Reena 2019                    -0.9104 [ -1.3230; -0.4978]        0.7
## Shah 2016                     -0.3297 [ -0.8438;  0.1844]        0.7
## Shukla 2017                   -0.5891 [ -1.0373; -0.1410]        0.7
## Sulser 2016                    0.0989 [ -0.2246;  0.4224]        0.7
## Sun 2005                       1.1310 [  0.8321;  1.4300]        0.7
## Taylor 2013                    0.8800 [  0.4415;  1.3185]        0.7
## Teoh 2010                     -0.1486 [ -0.5425;  0.2453]        0.7
## Teoh 2010                      0.5652 [  0.1657;  0.9647]        0.7
## Teoh 2010                      0.5968 [  0.2010;  0.9926]        0.7
## Toker 2019                    -1.0109 [ -1.4281; -0.5936]        0.7
## Tolon 2012                    -0.8071 [ -1.4543; -0.1599]        0.7
## Tsan 2020                      0.6173 [  0.2755;  0.9590]        0.7
## Yumul 2016                    -0.6805 [ -1.4132;  0.0522]        0.7
## Yumul 2016                    -0.0270 [ -0.7427;  0.6887]        0.7
## Yumul 2016                    -0.2275 [ -0.9203;  0.4652]        0.7
## El-Tahan 2017                  1.7259 [  0.8548;  2.5970]        0.6
## Enomoto 2008                   0.1524 [ -0.1232;  0.4281]        0.7
## Maruyama 2008                  0.6037 [ -0.2549;  1.4624]        0.6
## Turkstra 2005                  0.9339 [ -0.0524;  1.9202]        0.6
## Zhao 2014                     -1.2951 [ -1.6491; -0.9411]        0.7
## Serocki 2013                   0.7451 [  0.1251;  1.3650]        0.7
## Serocki 2013                   0.6208 [  0.0034;  1.2382]        0.7
## Arora 2013                     0.3881 [  0.0072;  0.7690]        0.7
## Avula 2019                     1.9217 [  1.3031;  2.5402]        0.7
## Filled: Malik 2008            -1.4867 [ -2.2535; -0.7199]        0.7
## Filled: Dashti 2014           -1.5752 [ -2.1389; -1.0115]        0.7
## Filled: Arici 2014            -1.5868 [ -2.0708; -1.1028]        0.7
## Filled: Laosuwan 2015         -1.6336 [ -2.5766; -0.6906]        0.6
## Filled: Al - Ghamdi 2016      -1.6561 [ -2.5458; -0.7664]        0.6
## Filled: Colak 2015            -1.6739 [ -2.2140; -1.1338]        0.7
## Filled: Bhalla 2018           -1.7159 [ -2.5290; -0.9027]        0.6
## Filled: Gunes 2020            -1.7397 [ -2.0685; -1.4108]        0.7
## Filled: Nishiyama 2011        -1.7694 [ -2.4992; -1.0397]        0.7
## Filled: Pournajafian 2014     -1.7917 [ -2.2491; -1.3343]        0.7
## Filled: Malik 2008            -1.8054 [ -2.6015; -1.0092]        0.6
## Filled: Erden 2010            -1.9341 [ -2.7491; -1.1191]        0.6
## Filled: Yao 2015              -1.9987 [ -2.4684; -1.5289]        0.7
## Filled: El-Tahan 2017         -2.0201 [ -2.8912; -1.1490]        0.6
## Filled: Avula 2019            -2.2158 [ -2.8344; -1.5973]        0.7
## Filled: Parasa 2016           -2.2609 [ -2.8843; -1.6375]        0.7
## Filled: Nishiyama 2011        -3.0980 [ -3.9753; -2.2208]        0.6
## Filled: Al - Ghamdi 2016      -3.3774 [ -4.5984; -2.1565]        0.6
## Filled: Lee 2013              -4.0891 [ -5.1620; -3.0161]        0.6
## Filled: Ahmad 2016            -4.3901 [ -4.9180; -3.8621]        0.7
## Filled: Bakshi - Exp 2015     -5.2877 [ -7.1931; -3.3823]        0.4
## Filled: Aggarwal 2019         -5.3521 [ -6.1677; -4.5365]        0.6
## Filled: Bakshi - Exp 2015     -5.4493 [ -7.4025; -3.4960]        0.4
## Filled: Bakshi - Nvl 2015     -7.2825 [ -9.7968; -4.7683]        0.3
## Filled: Bakshi - Nvl 2015     -8.2676 [-11.0927; -5.4426]        0.3
## 
## Number of studies combined: k = 151 (with 25 added studies)
## 
##                          SMD            95%-CI     z p-value
## Random effects model -0.1569 [-0.3545; 0.0407] -1.56  0.1196
## Prediction interval          [-2.5042; 2.1904]              
## 
## Quantifying heterogeneity:
##  tau^2 = 1.4009 [1.7375; 2.8919]; tau = 1.1836 [1.3182; 1.7006];
##  I^2 = 95.8% [95.4%; 96.2%]; H = 4.89 [4.67; 5.12]
## 
## Test of heterogeneity:
##        Q d.f. p-value
##  3581.79  150       0
## 
## Details on meta-analytical method:
## - Inverse variance method
## - DerSimonian-Laird estimator for tau^2
## - Jackson method for confidence interval of tau^2 and tau
## - Trim-and-fill method to adjust for funnel plot asymmetry

#Network Meta-analyses: each device assessed individualy

Network meta-analysis of failed intubation

net.fail<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Table Analysis Network Failed Intubation.2.csv")
net.fail.sens<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Tables Meta-regression/Table Meta-regression Network Failed Intubation.2.csv")
sum(net.fail$sampleSize)
## [1] 16478
table(net.fail$treatment)
## 
##                  Airtraq    Airtraq_Non_channeled                  APA_DAB 
##                       34                        1                        1 
##                  APA_MAC                    C_MAC                  C_MAC_D 
##                        2                       26                       10 
##                  CEL_100               Glidescope     Kingvision_Channeled 
##                        1                       51                       14 
## Kingvision_Non_channeled                Macintosh              McGrath_MAC 
##                        6                      121                       17 
##         McGrath_Series_5               Pentax_AWS                  Truview 
##                       20                       16                       12 
##              UESCOPE_MAC 
##                        2
id<-c("Airtraq","Airtraq_Non_channeled","APA_DAB","APA_MAC","C_MAC","C_MAC_D","CEL_100","Glidescope","Kingvision_Channeled","Kingvision_Non_channeled","Macintosh","McGrath_MAC","McGrath_Series_5","Pentax_AWS","Truview","UESCOPE_MAC")
description<-c("Airtraq","Airtraq Non-channeled","APA DAB","APA MAC","C-MAC","C-MAC D","CEL 100","Glidescope","KingVision Channeled","KingVision Non-channeled","Macintosh","McGrath MAC","McGrath Series 5", "Pentax AWS","Truview","UESCOPE MAC")
treat.codes.fail<-data.frame(id,description)
graph.fail<-gemtc::mtc.network(data.ab = net.fail,treatments = treat.codes.fail)
summary(graph.fail)
## $Description
## [1] "MTC dataset: Network"
## 
## $`Studies per treatment`
##                  Airtraq    Airtraq_Non_channeled                  APA_DAB 
##                       34                        1                        1 
##                  APA_MAC                    C_MAC                  C_MAC_D 
##                        2                       26                       10 
##                  CEL_100               Glidescope     Kingvision_Channeled 
##                        1                       51                       14 
## Kingvision_Non_channeled                Macintosh              McGrath_MAC 
##                        6                      121                       17 
##         McGrath_Series_5               Pentax_AWS                  Truview 
##                       20                       16                       12 
##              UESCOPE_MAC 
##                        2 
## 
## $`Number of n-arm studies`
## 2-arm 3-arm 4-arm 6-arm 
##   127    18     5     1 
## 
## $`Studies per treatment comparison`
##                          t1                       t2 nr
## 1                   Airtraq                  APA_DAB  1
## 2                   Airtraq                    C_MAC  2
## 3                   Airtraq                  C_MAC_D  1
## 4                   Airtraq               Glidescope  7
## 5                   Airtraq     Kingvision_Channeled  3
## 6                   Airtraq Kingvision_Non_channeled  1
## 7                   Airtraq                Macintosh 25
## 8                   Airtraq              McGrath_MAC  1
## 9                   Airtraq         McGrath_Series_5  2
## 10                  Airtraq               Pentax_AWS  1
## 11    Airtraq_Non_channeled                  APA_MAC  1
## 12    Airtraq_Non_channeled Kingvision_Non_channeled  1
## 13    Airtraq_Non_channeled                Macintosh  1
## 14                  APA_DAB                  C_MAC_D  1
## 15                  APA_DAB               Glidescope  1
## 16                  APA_DAB     Kingvision_Channeled  1
## 17                  APA_DAB              McGrath_MAC  1
## 18                  APA_MAC                    C_MAC  1
## 19                  APA_MAC     Kingvision_Channeled  1
## 20                  APA_MAC Kingvision_Non_channeled  1
## 21                  APA_MAC                Macintosh  1
## 22                    C_MAC                  C_MAC_D  1
## 23                    C_MAC               Glidescope  5
## 24                    C_MAC     Kingvision_Channeled  2
## 25                    C_MAC Kingvision_Non_channeled  1
## 26                    C_MAC                Macintosh 18
## 27                    C_MAC              McGrath_MAC  2
## 28                    C_MAC         McGrath_Series_5  3
## 29                    C_MAC               Pentax_AWS  1
## 30                  C_MAC_D               Glidescope  4
## 31                  C_MAC_D     Kingvision_Channeled  2
## 32                  C_MAC_D Kingvision_Non_channeled  1
## 33                  C_MAC_D                Macintosh  6
## 34                  C_MAC_D              McGrath_MAC  1
## 35                  CEL_100                Macintosh  1
## 36               Glidescope     Kingvision_Channeled  4
## 37               Glidescope Kingvision_Non_channeled  3
## 38               Glidescope                Macintosh 34
## 39               Glidescope              McGrath_MAC  4
## 40               Glidescope         McGrath_Series_5  4
## 41               Glidescope               Pentax_AWS  5
## 42               Glidescope                  Truview  1
## 43     Kingvision_Channeled Kingvision_Non_channeled  1
## 44     Kingvision_Channeled                Macintosh  7
## 45     Kingvision_Channeled              McGrath_MAC  2
## 46 Kingvision_Non_channeled                Macintosh  3
## 47                Macintosh              McGrath_MAC 10
## 48                Macintosh         McGrath_Series_5 13
## 49                Macintosh               Pentax_AWS 12
## 50                Macintosh                  Truview 11
## 51                Macintosh              UESCOPE_MAC  2
## 52              McGrath_MAC               Pentax_AWS  1
## 53              McGrath_MAC                  Truview  2
## 54         McGrath_Series_5                  Truview  3
## 55               Pentax_AWS                  Truview  1
plot(graph.fail,use.description = TRUE,vertex.shape="circle",vertex.color="white",vertex.size=15,vertex.label.color="dark blue",vertex.label.cex=1.0,edge.color="light blue")

model.fail <- mtc.model(graph.fail,likelihood = "binom",link = "logit",linearModel = "random",n.chain = 4)
mcmc.fail.1 <- mtc.run(model.fail, n.adapt = 50, n.iter = 1000, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 334
##    Unobserved stochastic nodes: 350
##    Total graph size: 6678
## 
## Initializing model
## Warning in rjags::jags.model(file.model, data = syntax[["data"]], inits =
## syntax[["inits"]], : Adaptation incomplete
## NOTE: Stopping adaptation
summary(mcmc.fail.1)
## 
## Results on the Log Odds Ratio scale
## 
## Iterations = 60:1050
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 100 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                        Mean     SD Naive SE Time-series SE
## d.Airtraq.APA_DAB                   3.63922 1.6996  0.08498        0.08433
## d.Airtraq.C_MAC                    -0.89236 0.9786  0.04893        0.08175
## d.Airtraq.C_MAC_D                  -1.21981 1.6004  0.08002        0.14582
## d.Airtraq.Glidescope               -0.00457 0.7715  0.03858        0.07567
## d.Airtraq.Kingvision_Channeled      2.39460 1.2658  0.06329        0.10129
## d.Airtraq.Kingvision_Non_channeled -0.63957 1.6647  0.08324        0.10346
## d.Airtraq.Macintosh                 1.53181 0.6713  0.03356        0.06490
## d.Airtraq.McGrath_MAC              -0.09946 1.1949  0.05974        0.08161
## d.Airtraq.McGrath_Series_5          1.10573 0.9166  0.04583        0.06668
## d.Airtraq.Pentax_AWS               -0.28566 1.1877  0.05938        0.07751
## d.Macintosh.Airtraq_Non_channeled  -1.65139 2.0049  0.10025        0.10909
## d.Macintosh.APA_MAC                -1.16331 1.5173  0.07586        0.09382
## d.Macintosh.CEL_100                -0.27487 2.3587  0.11793        0.11734
## d.Macintosh.Truview                -1.75566 1.3681  0.06841        0.09299
## d.Macintosh.UESCOPE_MAC            -0.07630 2.3058  0.11529        0.11490
## sd.d                                2.11969 0.4132  0.02066        0.04966
## 
## 2. Quantiles for each variable:
## 
##                                        2.5%     25%       50%     75% 97.5%
## d.Airtraq.APA_DAB                   0.63548  2.4849  3.522568  4.8205 6.979
## d.Airtraq.C_MAC                    -2.75237 -1.5557 -0.887212 -0.2819 1.104
## d.Airtraq.C_MAC_D                  -4.56669 -2.2011 -1.220613 -0.1060 1.692
## d.Airtraq.Glidescope               -1.62437 -0.5035 -0.025063  0.5252 1.454
## d.Airtraq.Kingvision_Channeled     -0.02408  1.5394  2.310897  3.1565 4.891
## d.Airtraq.Kingvision_Non_channeled -3.79499 -1.6905 -0.702757  0.4224 2.564
## d.Airtraq.Macintosh                 0.39847  1.0611  1.473264  1.9030 3.004
## d.Airtraq.McGrath_MAC              -2.20906 -0.8681 -0.088670  0.6343 2.490
## d.Airtraq.McGrath_Series_5         -0.66407  0.5060  1.083457  1.6951 2.918
## d.Airtraq.Pentax_AWS               -2.49897 -1.0113 -0.308606  0.4405 2.142
## d.Macintosh.Airtraq_Non_channeled  -5.54668 -2.9328 -1.794360 -0.3486 2.610
## d.Macintosh.APA_MAC                -4.26310 -2.1352 -1.046548 -0.1888 1.453
## d.Macintosh.CEL_100                -5.10705 -1.8066 -0.234467  1.2739 4.235
## d.Macintosh.Truview                -4.57036 -2.4863 -1.687842 -0.8945 0.666
## d.Macintosh.UESCOPE_MAC            -4.56897 -1.4080  0.001713  1.2343 5.132
## sd.d                                1.42408  1.8379  2.108180  2.3789 2.938
## 
## -- Model fit (residual deviance):
## 
##     Dbar       pD      DIC 
## 156.2592 114.2927 270.5519 
## 
## 334 data points, ratio 0.4678, I^2 = 0%
mcmc.fail.2 <- mtc.run(model.fail, n.adapt = 5000, n.iter = 1e5, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 334
##    Unobserved stochastic nodes: 350
##    Total graph size: 6678
## 
## Initializing model
summary(mcmc.fail.2)
## 
## Results on the Log Odds Ratio scale
## 
## Iterations = 5010:105000
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 10000 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                       Mean     SD Naive SE Time-series SE
## d.Airtraq.APA_DAB                   3.7479 1.8668 0.009334       0.010781
## d.Airtraq.C_MAC                    -0.8545 0.9757 0.004879       0.008599
## d.Airtraq.C_MAC_D                  -1.1577 1.6729 0.008364       0.016412
## d.Airtraq.Glidescope                0.0433 0.8043 0.004021       0.006922
## d.Airtraq.Kingvision_Channeled      2.3537 1.2307 0.006154       0.010361
## d.Airtraq.Kingvision_Non_channeled -0.5915 1.6332 0.008166       0.010964
## d.Airtraq.Macintosh                 1.6209 0.6757 0.003379       0.006896
## d.Airtraq.McGrath_MAC              -0.2288 1.2874 0.006437       0.009615
## d.Airtraq.McGrath_Series_5          1.2035 0.9559 0.004779       0.007366
## d.Airtraq.Pentax_AWS               -0.1184 1.1756 0.005878       0.008456
## d.Macintosh.Airtraq_Non_channeled  -1.7849 2.0081 0.010041       0.010410
## d.Macintosh.APA_MAC                -1.2051 1.5516 0.007758       0.008603
## d.Macintosh.CEL_100                -0.5125 2.4487 0.012244       0.012696
## d.Macintosh.Truview                -1.6732 1.4227 0.007113       0.009735
## d.Macintosh.UESCOPE_MAC            -0.3739 2.2678 0.011339       0.011665
## sd.d                                2.1733 0.4442 0.002221       0.007649
## 
## 2. Quantiles for each variable:
## 
##                                        2.5%     25%      50%      75% 97.5%
## d.Airtraq.APA_DAB                   0.01197  2.5466  3.74631  4.94994 7.438
## d.Airtraq.C_MAC                    -2.80826 -1.4928 -0.84442 -0.21683 1.049
## d.Airtraq.C_MAC_D                  -4.58819 -2.2338 -1.11585 -0.03285 2.022
## d.Airtraq.Glidescope               -1.55567 -0.4884  0.04373  0.57440 1.621
## d.Airtraq.Kingvision_Channeled     -0.01972  1.5387  2.33160  3.13825 4.858
## d.Airtraq.Kingvision_Non_channeled -3.78719 -1.6509 -0.59984  0.45829 2.659
## d.Airtraq.Macintosh                 0.34434  1.1675  1.59895  2.05382 3.020
## d.Airtraq.McGrath_MAC              -2.78621 -1.0633 -0.22875  0.61022 2.294
## d.Airtraq.McGrath_Series_5         -0.63083  0.5655  1.17867  1.81408 3.164
## d.Airtraq.Pentax_AWS               -2.46603 -0.8763 -0.11219  0.65042 2.197
## d.Macintosh.Airtraq_Non_channeled  -5.79815 -3.0698 -1.77896 -0.48004 2.183
## d.Macintosh.APA_MAC                -4.30596 -2.2068 -1.18846 -0.19035 1.815
## d.Macintosh.CEL_100                -5.38404 -2.1183 -0.50014  1.08928 4.356
## d.Macintosh.Truview                -4.61159 -2.5811 -1.62887 -0.72683 1.030
## d.Macintosh.UESCOPE_MAC            -4.89833 -1.8385 -0.36883  1.08925 4.098
## sd.d                                1.43502  1.8608  2.12351  2.43740 3.184
## 
## -- Model fit (residual deviance):
## 
##     Dbar       pD      DIC 
## 155.1141 114.6662 269.7803 
## 
## 334 data points, ratio 0.4644, I^2 = 0%
gelman.plot(mcmc.fail.1)

gelman.plot(mcmc.fail.2)

gelman.diag(mcmc.fail.1)$mpsrf
## [1] 1.520214
gelman.diag(mcmc.fail.2)$mpsrf
## [1] 1.001702
anohe.fail<-gemtc::mtc.anohe(graph.fail,n.adapt = 5000, n.iter = 1e5, thin = 10,sampler="rjags")
summary(anohe.fail)
nodesplit.fail <- gemtc::mtc.nodesplit(graph.fail, linearModel = "random", likelihood = "binom",link = "logit",n.adapt = 5000, n.iter = 1e5, thin = 10)
summary(nodesplit.fail)
forest(gemtc::relative.effect(mcmc.fail.2, t1 = "Macintosh"), use.description = TRUE,xlim = c(-10, 10))

rank.probability.fail <- gemtc::rank.probability(mcmc.fail.2, preferredDirection = -1)
plot(rank.probability.fail, beside=TRUE)

sucra.fail <- dmetar::sucra(rank.probability.fail, lower.is.better = FALSE)
sucra.fail
##                               SUCRA
## C_MAC                    0.78208667
## C_MAC_D                  0.77702833
## Kingvision_Non_channeled 0.69846333
## McGrath_MAC              0.63851167
## Pentax_AWS               0.61944167
## Airtraq_Non_channeled    0.60594000
## Airtraq                  0.59794333
## Truview                  0.59634167
## Glidescope               0.58621167
## APA_MAC                  0.50382833
## CEL_100                  0.41209500
## UESCOPE_MAC              0.38663500
## McGrath_Series_5         0.32842833
## Macintosh                0.23219000
## Kingvision_Channeled     0.15677667
## APA_DAB                  0.07807833
sum(net.fail$responders[net.fail$treatment=="macintosh"])/sum(net.fail$sampleSize[net.fail$treatment=="macintosh"])
## [1] NaN
meta::metaprop(event = net.fail$responders[net.fail$treatment=="Macintosh"],n= net.fail$sampleSize[net.fail$treatment=="Macintosh"], studlab = net.fail$study[net.fail$treatment=="Macintosh"],data = net.fail[net.fail$treatment=="Macintosh",],method = "GLMM",sm = "PLOGIT",comb.fixed = FALSE,comb.random = TRUE,hakn = TRUE)
##                        proportion           95%-CI
## Abdallah 2011              0.0000 [0.0000; 0.0725]
## Abdallah 2019              0.0000 [0.0000; 0.1000]
## Abdelgalel 2018            0.0250 [0.0006; 0.1316]
## Agrawal 2020               0.0000 [0.0000; 0.0881]
## Ahmad 2016                 0.0000 [0.0000; 0.0377]
## Ahmad 2015                 0.0000 [0.0000; 0.1372]
## Akbar 2015                 0.0000 [0.0000; 0.0787]
## Al-Ghamdi 2016             0.0000 [0.0000; 0.1544]
## Ali 2012                   0.0000 [0.0000; 0.1372]
## Ali 2017                   0.0000 [0.0000; 0.1157]
## Altun 2018                 0.0698 [0.0146; 0.1906]
## Ander 2017                 0.1282 [0.0430; 0.2743]
## Andersen 2011              0.0400 [0.0049; 0.1371]
## Aoi 2010                   0.0556 [0.0014; 0.2729]
## Aqil 2016                  0.0000 [0.0000; 0.0881]
## Aqil 2017                  0.0000 [0.0000; 0.0513]
## Arici 2014                 0.0000 [0.0000; 0.0881]
## Arima 2014                 0.0000 [0.0000; 0.0672]
## Arslan 2017                0.0000 [0.0000; 0.0881]
## Aziz 2012                  0.1565 [0.1018; 0.2255]
## Bakshi 2015                0.1429 [0.0178; 0.4281]
## Bakshi_2 2015              0.0000 [0.0000; 0.2316]
## Bakshi_3 2015              0.0000 [0.0000; 0.2316]
## Bakshi 2019                0.0270 [0.0007; 0.1416]
## Barak 2007                 0.0000 [0.0000; 0.0402]
## Barman 2017                0.0000 [0.0000; 0.1000]
## Bashir 2020                0.0000 [0.0000; 0.0881]
## Colak 2019                 0.0000 [0.0000; 0.0787]
## El-Tahan 2018              0.0000 [0.0000; 0.1089]
## Huang 2020                 0.0000 [0.0000; 0.1157]
## Wasem 2013                 0.0000 [0.0000; 0.1157]
## Yao 2015                   0.0000 [0.0000; 0.0740]
## Bhalla 2018                0.0000 [0.0000; 0.2180]
## Bhandari 2013              0.0500 [0.0061; 0.1692]
## Bharti 2014                0.0000 [0.0000; 0.1765]
## Bhat 2015                  0.0000 [0.0000; 0.0711]
## Bilehjani 2009             0.0000 [0.0000; 0.0925]
## Blajic 2019                0.0000 [0.0000; 0.0606]
## Cakir 2020                 0.0000 [0.0000; 0.1122]
## Carlino 2009               0.0000 [0.0000; 0.2180]
## Chalkeidis 2010            0.0357 [0.0009; 0.1835]
## Chandrashekaraiah 2017     0.0333 [0.0008; 0.1722]
## Dey 2020                   0.0000 [0.0000; 0.0330]
## Dhonneur 2008              0.0094 [0.0002; 0.0514]
## Driver 2016                0.0842 [0.0371; 0.1592]
## Erden 2010                 0.0000 [0.0000; 0.2059]
## Erturk 2015                0.0000 [0.0000; 0.0881]
## Gao 2018                   0.0976 [0.0431; 0.1832]
## Griesdale 2012             0.0000 [0.0000; 0.1684]
## Gunes 2020                 0.0000 [0.0000; 0.0402]
## Gupta 2020                 0.0000 [0.0000; 0.0881]
## Hirabayashi 2009           0.0000 [0.0000; 0.0143]
## Hirabayashi 2010           0.0000 [0.0000; 0.0362]
## Hosalli 2017               0.0000 [0.0000; 0.1157]
## Hsu 2012                   0.0000 [0.0000; 0.1157]
## Hu 2017                    0.0105 [0.0003; 0.0573]
## Ilyas 2014                 0.0000 [0.0000; 0.0560]
## Jungbauer 2009             0.0800 [0.0352; 0.1516]
## Kaur 2020                  0.0000 [0.0000; 0.0881]
## Kido 2015                  0.0000 [0.0000; 0.1372]
## Kill 2013                  0.1000 [0.0211; 0.2653]
## Kim 2013                   0.0000 [0.0000; 0.1482]
## Kleine-Brueggeney 2017     0.4333 [0.3432; 0.5269]
## Koh 2010                   0.1600 [0.0454; 0.3608]
## Kucukosman 2020            0.0000 [0.0000; 0.1157]
## Laosuwan 2015              0.0000 [0.0000; 0.2849]
## Lascarrou 2017             0.0000 [0.0000; 0.0201]
## Lim 2005                   0.0000 [0.0000; 0.1157]
## Lin 2012                   0.0353 [0.0073; 0.0997]
## Liu 2014                   0.0000 [0.0000; 0.0881]
## Liu 2016                   0.0111 [0.0003; 0.0604]
## Macke 2020                 0.0000 [0.0000; 0.0474]
## Maharaj 2006               0.0000 [0.0000; 0.1157]
## Maharaj 2007               0.0500 [0.0013; 0.2487]
## Maharaj 2008               0.2000 [0.0573; 0.4366]
## Malik 2008                 0.0667 [0.0082; 0.2207]
## Malik_1 2009               0.1600 [0.0454; 0.3608]
## Malik_2 2009               0.0000 [0.0000; 0.1157]
## Maruyama 2008              0.0000 [0.0000; 0.2646]
## Mcelwain 2011              0.0645 [0.0079; 0.2142]
## Myunghun-Kim 2017          0.0000 [0.0000; 0.1684]
## Kulkarni 2013              0.0000 [0.0000; 0.1157]
## Inangil 2018               0.0571 [0.0070; 0.1916]
## Ing 2017                   0.0000 [0.0000; 0.2059]
## Jafra 2018                 0.0000 [0.0000; 0.0362]
## Ndoko 2008                 0.1132 [0.0427; 0.2303]
## Ninan 2016                 0.0000 [0.0000; 0.1157]
## Parasa 2016                0.0000 [0.0000; 0.1157]
## Pazur 2016                 0.0000 [0.0000; 0.1323]
## Peirovifar 2014            0.2500 [0.0866; 0.4910]
## Pournajafian 2014          0.0566 [0.0118; 0.1566]
## Ranieri 2012               0.0312 [0.0038; 0.1084]
## Reena 2019                 0.0000 [0.0000; 0.0711]
## Risse 2020                 0.0645 [0.0079; 0.2142]
## Ruetzeler 2020             0.0794 [0.0263; 0.1756]
## Sargin 2016                0.0000 [0.0000; 0.0711]
## Shah 2016                  0.0333 [0.0008; 0.1722]
## Shukla 2017                0.0500 [0.0061; 0.1692]
## Sulser 2016                0.0000 [0.0000; 0.0493]
## Sun 2005                   0.0100 [0.0003; 0.0545]
## Takenaka 2011              0.1471 [0.0495; 0.3106]
## Taylor 2013                0.4091 [0.2634; 0.5675]
## Teoh 2010                  0.0000 [0.0000; 0.0362]
## Tsan 2020                  0.0000 [0.0000; 0.0521]
## Varsha 2019                0.0000 [0.0000; 0.1000]
## Vijayakumar 2016           0.0000 [0.0000; 0.0787]
## Walker 2009                0.0000 [0.0000; 0.0596]
## Yoo 2018                   0.2273 [0.0782; 0.4537]
## Cavus 2011                 0.1200 [0.0453; 0.2431]
## El-Tahan 2017              0.0000 [0.0000; 0.2316]
## Enomoto 2008               0.1058 [0.0540; 0.1814]
## Paik 2020                  0.0000 [0.0000; 0.3085]
## Turkstra 2009              0.0000 [0.0000; 0.2471]
## Zhao 2014                  0.3333 [0.2286; 0.4517]
## Cordovani 2019             0.2500 [0.0866; 0.4910]
## Ferrando 2011              0.0000 [0.0000; 0.1157]
## Foulds 2016                0.2800 [0.1207; 0.4939]
## Serocki 2013               0.1250 [0.0351; 0.2899]
## Serocki 2010               0.1000 [0.0279; 0.2366]
## Arora 2013                 0.0000 [0.0000; 0.0660]
## Abdelgawad 2015            0.0000 [0.0000; 0.0881]
## 
## Number of studies combined: k = 121
## 
##                      proportion           95%-CI
## Random effects model     0.0079 [0.0041; 0.0152]
## 
## Quantifying heterogeneity:
##  tau^2 = 5.0174; tau = 2.2400; I^2 = 90.6%; H = 3.27
## 
## Test of heterogeneity:
##       Q d.f.  p-value             Test
##  206.98  120 < 0.0001        Wald-type
##  749.49  120 < 0.0001 Likelihood-Ratio
## 
## Details on meta-analytical method:
## - Random intercept logistic regression model
## - Maximum-likelihood estimator for tau^2
## - Hartung-Knapp adjustment for random effects model
## - Logit transformation
## - Clopper-Pearson confidence interval for individual studies
## - Continuity correction of 0.5 in studies with zero cell frequencies
##   (only used to calculate individual study results)
results.fail<-gemtc::relative.effect.table(mcmc.fail.2)
results.fail<-format(results.fail,digits=2)
write.csv2(results.fail,file = "results_fail.csv")

#Network meta-regression by setting
net.fail.mr.set<-net.fail.sens[,c(1,4)]
table(net.fail.mr.set$set)
## 
## Emergency department                  ICU             Multiple 
##                    2                    4                    1 
##       Operating Room      Out of Hospital 
##                  141                    3
net.fail.mr.set$set<-factor(net.fail.mr.set$set,labels=c("0","0","0","1","0"))
net.fail.mr.set$set<-as.numeric(net.fail.mr.set$set)
network.fail.mr<-gemtc::mtc.network(data.ab = net.fail,studies = net.fail.mr.set,treatments = treat.codes.fail)
regressor.fail<-list(coefficient="shared",variable="set",control="Macintosh")
model.fail.mr<-gemtc::mtc.model(network.fail.mr,type = "regression",regressor = regressor.fail)
mcmc.fail.mr.2 <- gemtc::mtc.run(model.fail.mr, n.adapt = 5000, n.iter = 1e5, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 334
##    Unobserved stochastic nodes: 351
##    Total graph size: 6844
## 
## Initializing model
nodesplit.fail.mr.2 <- gemtc::mtc.nodesplit(network.fail.mr, linearModel = "random", likelihood = "binom",link = "logit",n.adapt = 5000, n.iter = 1e5, thin = 10)
summary(mcmc.fail.mr.2)
## 
## Results on the Log Odds Ratio scale
## 
## Iterations = 5010:105000
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 10000 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                        Mean     SD Naive SE Time-series SE
## d.Airtraq.APA_DAB                   3.74010 1.8971 0.009486       0.010958
## d.Airtraq.C_MAC                    -0.95782 1.0035 0.005017       0.008996
## d.Airtraq.C_MAC_D                  -1.13711 1.6878 0.008439       0.016052
## d.Airtraq.Glidescope                0.04711 0.8133 0.004066       0.007036
## d.Airtraq.Kingvision_Channeled      2.36049 1.2429 0.006215       0.010026
## d.Airtraq.Kingvision_Non_channeled -0.55903 1.6521 0.008260       0.011296
## d.Airtraq.Macintosh                 1.60754 0.6855 0.003428       0.007439
## d.Airtraq.McGrath_MAC              -0.20095 1.2946 0.006473       0.009512
## d.Airtraq.McGrath_Series_5          1.24480 0.9732 0.004866       0.007742
## d.Airtraq.Pentax_AWS               -0.19657 1.1969 0.005985       0.009188
## d.Macintosh.Airtraq_Non_channeled  -1.73125 2.0287 0.010143       0.010959
## d.Macintosh.APA_MAC                -1.16570 1.5651 0.007825       0.008778
## d.Macintosh.CEL_100                -0.43168 2.4717 0.012358       0.012705
## d.Macintosh.Truview                -1.64915 1.4436 0.007218       0.009661
## d.Macintosh.UESCOPE_MAC            -1.61960 2.7737 0.013868       0.030247
## sd.d                                2.20379 0.4456 0.002228       0.007524
## B                                  -0.67594 0.8203 0.004101       0.013975
## 
## 2. Quantiles for each variable:
## 
##                                         2.5%     25%      50%      75%  97.5%
## d.Airtraq.APA_DAB                  -0.027163  2.5084  3.74363  4.97708 7.4992
## d.Airtraq.C_MAC                    -2.965283 -1.6088 -0.95140 -0.30594 1.0204
## d.Airtraq.C_MAC_D                  -4.607607 -2.2207 -1.08197 -0.01712 2.1004
## d.Airtraq.Glidescope               -1.566547 -0.4774  0.04596  0.57419 1.6602
## d.Airtraq.Kingvision_Channeled     -0.005526  1.5412  2.32752  3.14755 4.9215
## d.Airtraq.Kingvision_Non_channeled -3.806649 -1.6358 -0.57197  0.50776 2.7692
## d.Airtraq.Macintosh                 0.325025  1.1483  1.58393  2.04230 3.0163
## d.Airtraq.McGrath_MAC              -2.755827 -1.0535 -0.19900  0.64598 2.3540
## d.Airtraq.McGrath_Series_5         -0.621462  0.5946  1.22163  1.86596 3.2352
## d.Airtraq.Pentax_AWS               -2.591514 -0.9755 -0.18722  0.58905 2.1463
## d.Macintosh.Airtraq_Non_channeled  -5.780009 -3.0344 -1.71987 -0.43701 2.3001
## d.Macintosh.APA_MAC                -4.318784 -2.1810 -1.14629 -0.13717 1.8948
## d.Macintosh.CEL_100                -5.371075 -2.0204 -0.42356  1.16609 4.4907
## d.Macintosh.Truview                -4.593714 -2.5758 -1.60922 -0.69541 1.1119
## d.Macintosh.UESCOPE_MAC            -7.230251 -3.4058 -1.61380  0.19826 3.7742
## sd.d                                1.455780  1.8900  2.16338  2.46978 3.2083
## B                                  -2.305454 -1.2091 -0.67550 -0.13542 0.9314
## 
## -- Model fit (residual deviance):
## 
##     Dbar       pD      DIC 
## 154.7486 114.9914 269.7401 
## 
## 334 data points, ratio 0.4633, I^2 = 0%
## 
## -- Regression settings:
## 
## Regression on "set", shared coefficients, "Macintosh" as control
## Input standardized: x' = (set - 1.933775) / 0.4990056
## Estimates at the centering value: set = 1.933775
summary(mcmc.fail.2)$DIC
##        Dbar          pD         DIC data points 
##    155.1141    114.6662    269.7803    334.0000
summary(mcmc.fail.mr.2)$DIC
##        Dbar          pD         DIC data points 
##    154.7486    114.9914    269.7401    334.0000
forest(gemtc::relative.effect(mcmc.fail.mr.2, t1 = "Macintosh"), use.description = TRUE,xlim = c(-10, 10))

rank.probability.fail.mr <- gemtc::rank.probability(mcmc.fail.mr.2, preferredDirection = -1)
plot(rank.probability.fail.mr, beside=TRUE)

sucra.fail.set <- dmetar::sucra(rank.probability.fail.mr, lower.is.better = FALSE)
sucra.fail.set
##                               SUCRA
## C_MAC                    0.78518500
## C_MAC_D                  0.76212333
## Kingvision_Non_channeled 0.67952500
## Pentax_AWS               0.62240167
## McGrath_MAC              0.61931667
## Airtraq_Non_channeled    0.58890333
## Airtraq                  0.58419500
## Truview                  0.58212333
## Glidescope               0.57103833
## UESCOPE_MAC              0.55606667
## APA_MAC                  0.48831333
## CEL_100                  0.39433500
## McGrath_Series_5         0.31243167
## Macintosh                0.22710000
## Kingvision_Channeled     0.15062500
## APA_DAB                  0.07631667
#Network meta-regression by predicted difficult airway
net.fail.predicted<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Table Analysis Network Subgroup Predicted.csv")
net.fail.mr.predicted<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Tables Meta-regression/Table Meta-regression Network Subgroup Predicted.csv")
table(net.fail.predicted$treatment)
## 
##                  Airtraq    Airtraq_Non_channeled                  APA_DAB 
##                       32                        1                        1 
##                  APA_MAC                    C_MAC                  C_MAC_D 
##                        1                       22                       10 
##                  CEL_100               Glidescope     Kingvision_Channeled 
##                        1                       49                       12 
## Kingvision_Non_channeled                Macintosh              McGrath_MAC 
##                        6                      107                       17 
##         McGrath_Series_5               Pentax_AWS                  Truview 
##                       17                       15                        9
table(net.fail.mr.predicted$predicted)
## 
## Difficult      Easy 
##        59        77
id<-c("Airtraq","Airtraq_Non_channeled","APA_DAB","APA_MAC","C_MAC","C_MAC_D","CEL_100","Glidescope","Kingvision_Channeled","Kingvision_Non_channeled","Macintosh","McGrath_MAC","McGrath_Series_5","Pentax_AWS","Truview")
description<-c("Airtraq","Airtraq Non-channeled","APA DAB","APA MAC","C_MAC","C-MAC D","CEL 100","Glidescope","King Vision Channeled","King Vision Non-channeled","Macintosh","McGrath MAC","McGrath Series 5", "Pentax AWS","Truview")
treat.codes.fail.predicted<-data.frame(id,description)
net.fail.mr.predicted$predicted<-factor(net.fail.mr.predicted$predicted,labels = c(1,0))
net.fail.mr.predicted$predicted<-as.numeric(net.fail.mr.predicted$predicted)
network.fail.mr.predicted<-gemtc::mtc.network(data.ab = net.fail.predicted,studies = net.fail.mr.predicted,treatments = treat.codes.fail.predicted)
regressor.fail.predicted<-list(coefficient="shared",variable="predicted",control="Macintosh")
model.fail.mr.predicted<-gemtc::mtc.model(network.fail.mr.predicted,type = "regression",regressor = regressor.fail.predicted)
mcmc.fail.mr.2.predicted <- gemtc::mtc.run(model.fail.mr.predicted, n.adapt = 5000, n.iter = 1e5, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 300
##    Unobserved stochastic nodes: 316
##    Total graph size: 6144
## 
## Initializing model
nodesplit.fail.mr.2 <- gemtc::mtc.nodesplit(network.fail.mr, linearModel = "random", likelihood = "binom",link = "logit",n.adapt = 5000, n.iter = 1e5, thin = 10)
summary(mcmc.fail.mr.2.predicted)
## 
## Results on the Log Odds Ratio scale
## 
## Iterations = 5010:105000
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 10000 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                        Mean     SD Naive SE Time-series SE
## d.Airtraq.APA_DAB                   3.63746 1.9412 0.009706       0.011745
## d.Airtraq.C_MAC                    -1.08041 1.0972 0.005486       0.010105
## d.Airtraq.C_MAC_D                  -1.20852 1.7324 0.008662       0.017833
## d.Airtraq.Glidescope                0.06241 0.8422 0.004211       0.007226
## d.Airtraq.Kingvision_Channeled      2.21575 1.4010 0.007005       0.012123
## d.Airtraq.Kingvision_Non_channeled -0.04944 1.7347 0.008674       0.012435
## d.Airtraq.Macintosh                 1.57487 0.7221 0.003611       0.008027
## d.Airtraq.McGrath_MAC              -0.52769 1.3455 0.006727       0.010574
## d.Airtraq.McGrath_Series_5          0.39142 1.0536 0.005268       0.007606
## d.Airtraq.Pentax_AWS                0.34078 1.2328 0.006164       0.009967
## d.Macintosh.Airtraq_Non_channeled  -0.82098 2.1799 0.010900       0.012814
## d.Macintosh.APA_MAC                 0.42381 2.1819 0.010909       0.012058
## d.Macintosh.CEL_100                -1.00884 2.5239 0.012620       0.014046
## d.Macintosh.Truview                 0.37234 2.1893 0.010946       0.018634
## sd.d                                2.22623 0.5063 0.002531       0.009494
## B                                   1.15789 0.8591 0.004295       0.013940
## 
## 2. Quantiles for each variable:
## 
##                                       2.5%     25%      50%      75% 97.5%
## d.Airtraq.APA_DAB                  -0.2292  2.3979  3.64060  4.87510 7.509
## d.Airtraq.C_MAC                    -3.2912 -1.7825 -1.07070 -0.35832 1.050
## d.Airtraq.C_MAC_D                  -4.7816 -2.2999 -1.15144 -0.06054 2.059
## d.Airtraq.Glidescope               -1.6105 -0.4874  0.06649  0.60871 1.733
## d.Airtraq.Kingvision_Channeled     -0.4160  1.2874  2.16077  3.08881 5.146
## d.Airtraq.Kingvision_Non_channeled -3.4263 -1.1829 -0.08804  1.06008 3.448
## d.Airtraq.Macintosh                 0.2278  1.0929  1.54665  2.03016 3.082
## d.Airtraq.McGrath_MAC              -3.2121 -1.3988 -0.51603  0.33651 2.130
## d.Airtraq.McGrath_Series_5         -1.6480 -0.3077  0.37455  1.07330 2.515
## d.Airtraq.Pentax_AWS               -2.0990 -0.4643  0.33424  1.14448 2.782
## d.Macintosh.Airtraq_Non_channeled  -5.1624 -2.2163 -0.82813  0.55338 3.561
## d.Macintosh.APA_MAC                -3.9218 -0.9646  0.40408  1.80005 4.827
## d.Macintosh.CEL_100                -6.0483 -2.6236 -0.99209  0.62132 4.000
## d.Macintosh.Truview                -4.0069 -1.0484  0.38620  1.79866 4.687
## sd.d                                1.3950  1.8672  2.16966  2.52434 3.392
## B                                  -0.4808  0.5875  1.13502  1.71234 2.906
## 
## -- Model fit (residual deviance):
## 
##     Dbar       pD      DIC 
## 138.8937 103.2662 242.1599 
## 
## 300 data points, ratio 0.463, I^2 = 0%
## 
## -- Regression settings:
## 
## Regression on "predicted", shared coefficients, "Macintosh" as control
## Input standardized: x' = (predicted - 1.566176) / 0.994867
## Estimates at the centering value: predicted = 1.566176
summary(mcmc.fail.mr.2.predicted)$DIC
##        Dbar          pD         DIC data points 
##    138.8937    103.2662    242.1599    300.0000
forest(gemtc::relative.effect(mcmc.fail.mr.2.predicted, t1 = "Macintosh"), use.description = TRUE,xlim = c(-10, 10))

rank.probability.fail.mr.predicted <- gemtc::rank.probability(mcmc.fail.mr.2.predicted, preferredDirection = -1)
plot(rank.probability.fail.mr, beside=TRUE)

sucra.fail.predicted <- dmetar::sucra(rank.probability.fail.mr.predicted, lower.is.better = FALSE)
sucra.fail.predicted
##                              SUCRA
## C_MAC                    0.8290768
## C_MAC_D                  0.7983250
## McGrath_MAC              0.7176321
## Kingvision_Non_channeled 0.6272375
## Airtraq                  0.6258893
## Glidescope               0.6121393
## Pentax_AWS               0.5497536
## McGrath_Series_5         0.5373286
## CEL_100                  0.5123839
## Airtraq_Non_channeled    0.4862964
## Truview                  0.3084786
## APA_MAC                  0.2940018
## Macintosh                0.2761214
## Kingvision_Channeled     0.2175125
## APA_DAB                  0.1078232
#Funnel Plot for failed intubation
funnel.fail<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Tables Funnel Plots/Funnel Network Fail.csv")
funnel.fail_analysis<-dplyr::filter(funnel.fail,funnel.fail$fint.e1>0 | funnel.fail$fint.e2>0)
mbin_funnel.fail_random<-meta::metabin(fint.e1,fint.t1,fint.e2,fint.t2,data = funnel.fail_analysis,studlab =study,comb.fixed = FALSE,comb.random = TRUE,method.tau = "SJ",hakn = TRUE,prediction = TRUE,incr = 0.5,sm = "RR")
mbin_funnel.fail_random
##                             RR             95%-CI %W(random)
## Abdallah 2011          10.7822 [0.6123; 189.8605]        1.1
## Abdelgalel 2018         0.1677 [0.0070;   4.0257]        1.0
## Ahmed 2017              0.3333 [0.0141;   7.8648]        1.0
## Altun 2018              0.3496 [0.0607;   2.0132]        1.9
## Alvis 2016              0.0627 [0.0037;   1.0528]        1.1
## Ander 2017              0.0909 [0.0052;   1.5894]        1.1
## Andersen 2011           0.2000 [0.0098;   4.0624]        1.0
## Aoi 2010                1.0000 [0.0676;  14.7865]        1.2
## Arima 2014              4.7345 [0.2326;  96.3642]        1.0
## Arslan 2015             0.3333 [0.0140;   7.9424]        1.0
## Aziz 2012               0.4718 [0.2387;   0.9327]        2.8
## Bakshi 2019             0.3333 [0.0140;   7.9235]        1.0
## Bakshi - Nti 2015       1.7500 [0.4169;   7.3460]        2.1
## Bakshi - Nvl 2015       5.5965 [0.3319;  94.3792]        1.1
## Belze 2017              2.5000 [0.5183;  12.0576]        2.0
## Bhandari 2013           0.2000 [0.0099;   4.0371]        1.1
## Brozek 2020            13.0000 [0.7503; 225.2533]        1.1
## Cavus 2018              1.0357 [0.0980;  10.9440]        1.4
## Cavus 2018              0.0918 [0.0130;   0.6477]        1.7
## Cavus 2011              0.0387 [0.0022;   0.6726]        1.1
## Chalkeidis 2010         3.2000 [0.3787;  27.0413]        1.6
## Chandrashekaraiah 2017  0.3333 [0.0141;   7.8648]        1.0
## Cordovani 2019          0.5000 [0.1359;   1.8393]        2.2
## Dhonneur 2008           0.3333 [0.0137;   8.0906]        1.0
## Driver 2016             0.2306 [0.0502;   1.0587]        2.0
## Enomoto 2008            0.0457 [0.0027;   0.7646]        1.1
## Erden 2010              3.0000 [0.1315;  68.4178]        1.0
## Ferrando 2011           3.0000 [0.1271;  70.7833]        1.0
## Foulds 2016             0.0694 [0.0042;   1.1508]        1.1
## Gao 2018                0.7593 [0.2757;   2.0909]        2.5
## Hu 2017                 0.3167 [0.0131;   7.6805]        1.0
## Ilyas 2014             11.0000 [0.6209; 194.8652]        1.1
## Inangil 2018            0.2000 [0.0100;   4.0192]        1.1
## Jungbauer 2009          0.1250 [0.0159;   0.9810]        1.6
## Kill 2013               0.1429 [0.0077;   2.6497]        1.1
## Kleine-Brueggeney 2016  2.4250 [1.2117;   4.8532]        2.8
## Kleine-Brueggeney 2017  0.4103 [0.3033;   0.5549]        3.0
## Koh 2010                0.2500 [0.0300;   2.0828]        1.6
## Lange 2009              0.2000 [0.0100;   3.9955]        1.1
## Lin 2012                0.6667 [0.1143;   3.8895]        1.8
## Liu 2009                0.1111 [0.0062;   1.9882]        1.1
## Liu 2016                2.0000 [0.1846;  21.6662]        1.4
## Liu 2019                0.0481 [0.0028;   0.8155]        1.1
## Maharaj 2007            0.3333 [0.0144;   7.7130]        1.0
## Maharaj 2008            0.1111 [0.0064;   1.9341]        1.1
## Malik 2008              0.5000 [0.0877;   2.8510]        1.9
## Malik1 2009             0.1250 [0.0147;   1.0604]        1.6
## Markham 2019            2.8333 [0.8571;   9.3663]        2.3
## Mcelwain 2011           0.2672 [0.0252;   2.8318]        1.4
## Myunghun-Kim 2017       3.0000 [0.1297;  69.4167]        1.0
## Ndoko 2008              0.0769 [0.0044;   1.3317]        1.1
## Ng 2012                 5.0000 [0.6006;  41.6277]        1.6
## Parasa 2016             2.9048 [0.1230;  68.5796]        1.0
## Peirovifar 2014         0.4000 [0.0876;   1.8256]        2.1
## Pournajafian 2014       2.0000 [0.5276;   7.5816]        2.2
## Ranieri 2012            0.1883 [0.0092;   3.8484]        1.0
## Raza 2017               0.5000 [0.0479;   5.2245]        1.4
## Risse 2020              0.1826 [0.0091;   3.6594]        1.1
## Ruetzeler 2020          0.3818 [0.0769;   1.8970]        2.0
## Serocki 2013            0.0569 [0.0032;   1.0243]        1.1
## Serocki 2010            0.2500 [0.0478;   1.3075]        1.9
## Shah 2016               0.3333 [0.0141;   7.8648]        1.0
## Shukla 2017             0.2000 [0.0099;   4.0371]        1.1
## Sun 2005                0.3333 [0.0137;   8.0852]        1.0
## Takenaka 2011           0.0883 [0.0051;   1.5379]        1.1
## Taylor 2013             0.0270 [0.0017;   0.4348]        1.2
## Walker 2009             3.0000 [0.1247;  72.1913]        1.0
## Wan 2016                2.0000 [0.1880;  21.2801]        1.4
## Yoo 2018                0.2000 [0.0254;   1.5756]        1.6
## Zhao 2014               0.3649 [0.1828;   0.7281]        2.8
## 
## Number of studies combined: k = 70
## 
##                          RR           95%-CI     t  p-value
## Random effects model 0.4890 [0.3493; 0.6846] -4.24 < 0.0001
## Prediction interval         [0.0518; 4.6195]               
## 
## Quantifying heterogeneity:
##  tau^2 = 1.2380 [0.1447; 1.4726]; tau = 1.1126 [0.3804; 1.2135];
##  I^2 = 41.1% [21.5%; 55.9%]; H = 1.30 [1.13; 1.51]
## 
## Test of heterogeneity:
##       Q d.f. p-value
##  117.19   69  0.0003
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Continuity correction of 0.5 in studies with zero cell frequencies
meta::funnel(mbin_funnel.fail_random,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))

dmetar::eggers.test(mbin_funnel.fail_random)
##              Intercept ConfidenceInterval      t       p
## Egger's test     -0.12       -0.512-0.272 -0.487 0.62787

Network meta-analysis of failed first intubation attempt

net.first<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Table Analysis Network First.csv")
sum(net.first$sampleSize)
## [1] 16953
table(net.first$treatment)
## 
##                  Airtraq    Airtraq_Non_channeled                  APA_DAB 
##                       32                        1                        1 
##                  APA_MAC                  C_MAC_D                C_MAC_MAC 
##                        2                       13                       23 
##                  CEL_100               Glidescope            Imago_V_Blade 
##                        1                       52                        1 
##     Kingvision_Channeled Kingvision_Non_channeled                Macintosh 
##                       16                        6                      110 
##              McGrath_MAC         McGrath_Series_5               Pentax_AWS 
##                       18                       15                       15 
##        Pentax_AWS_Miller                  Tosight                  Truview 
##                        1                        1                       10 
##              UESCOPE_MAC 
##                        2
id<-c("Airtraq","Airtraq_Non_channeled","APA_DAB","APA_MAC","C_MAC_D","C_MAC_MAC","CEL_100","Glidescope","Imago_V_Blade","Kingvision_Channeled","Kingvision_Non_channeled","Macintosh","McGrath_MAC","McGrath_Series_5","Pentax_AWS","Pentax_AWS_Miller","Tosight","Truview","UESCOPE_MAC")
description<-c("Airtraq","Airtraq Non-channeled","APA DAB","APA MAC","C-MAC D","C-MAC","CEL 100","Glidescope","Imago V-Blade","King Vision Channeled","King Vision Non-channeled","Macintosh","McGrath MAC","McGrath Series 5", "Pentax AWS","Pentax AWS Miller","Tosight","Truview","UESCOPE MAC")
treat.codes.first<-data.frame(id,description)
graph.first<-gemtc::mtc.network(data.ab = net.first,treatments = treat.codes.first)
summary(graph.first)
## $Description
## [1] "MTC dataset: Network"
## 
## $`Studies per treatment`
##                  Airtraq    Airtraq_Non_channeled                  APA_DAB 
##                       32                        1                        1 
##                  APA_MAC                  C_MAC_D                C_MAC_MAC 
##                        2                       13                       23 
##                  CEL_100               Glidescope            Imago_V_Blade 
##                        1                       52                        1 
##     Kingvision_Channeled Kingvision_Non_channeled                Macintosh 
##                       16                        6                      110 
##              McGrath_MAC         McGrath_Series_5               Pentax_AWS 
##                       18                       15                       15 
##        Pentax_AWS_Miller                  Tosight                  Truview 
##                        1                        1                       10 
##              UESCOPE_MAC 
##                        2 
## 
## $`Number of n-arm studies`
## 2-arm 3-arm 4-arm 6-arm 
##   120    14     8     1 
## 
## $`Studies per treatment comparison`
##                          t1                       t2 nr
## 1                   Airtraq                  APA_DAB  1
## 2                   Airtraq                  C_MAC_D  1
## 3                   Airtraq                C_MAC_MAC  2
## 4                   Airtraq               Glidescope  7
## 5                   Airtraq     Kingvision_Channeled  3
## 6                   Airtraq Kingvision_Non_channeled  1
## 7                   Airtraq                Macintosh 23
## 8                   Airtraq              McGrath_MAC  1
## 9                   Airtraq         McGrath_Series_5  2
## 10                  Airtraq               Pentax_AWS  2
## 11                  Airtraq        Pentax_AWS_Miller  1
## 12    Airtraq_Non_channeled                  APA_MAC  1
## 13    Airtraq_Non_channeled Kingvision_Non_channeled  1
## 14    Airtraq_Non_channeled                Macintosh  1
## 15                  APA_DAB                  C_MAC_D  1
## 16                  APA_DAB               Glidescope  1
## 17                  APA_DAB     Kingvision_Channeled  1
## 18                  APA_DAB              McGrath_MAC  1
## 19                  APA_MAC                C_MAC_MAC  1
## 20                  APA_MAC     Kingvision_Channeled  1
## 21                  APA_MAC Kingvision_Non_channeled  1
## 22                  APA_MAC                Macintosh  1
## 23                  C_MAC_D                C_MAC_MAC  1
## 24                  C_MAC_D               Glidescope  5
## 25                  C_MAC_D     Kingvision_Channeled  4
## 26                  C_MAC_D Kingvision_Non_channeled  1
## 27                  C_MAC_D                Macintosh  6
## 28                  C_MAC_D              McGrath_MAC  1
## 29                C_MAC_MAC               Glidescope  6
## 30                C_MAC_MAC     Kingvision_Channeled  2
## 31                C_MAC_MAC Kingvision_Non_channeled  1
## 32                C_MAC_MAC                Macintosh 16
## 33                C_MAC_MAC              McGrath_MAC  2
## 34                C_MAC_MAC         McGrath_Series_5  4
## 35                C_MAC_MAC               Pentax_AWS  1
## 36                  CEL_100                Macintosh  1
## 37               Glidescope            Imago_V_Blade  1
## 38               Glidescope     Kingvision_Channeled  4
## 39               Glidescope Kingvision_Non_channeled  3
## 40               Glidescope                Macintosh 34
## 41               Glidescope              McGrath_MAC  4
## 42               Glidescope         McGrath_Series_5  5
## 43               Glidescope               Pentax_AWS  5
## 44               Glidescope                  Truview  1
## 45     Kingvision_Channeled Kingvision_Non_channeled  1
## 46     Kingvision_Channeled                Macintosh  7
## 47     Kingvision_Channeled              McGrath_MAC  2
## 48 Kingvision_Non_channeled                Macintosh  3
## 49                Macintosh              McGrath_MAC 11
## 50                Macintosh         McGrath_Series_5  9
## 51                Macintosh               Pentax_AWS 11
## 52                Macintosh        Pentax_AWS_Miller  1
## 53                Macintosh                  Tosight  1
## 54                Macintosh                  Truview  9
## 55                Macintosh              UESCOPE_MAC  2
## 56              McGrath_MAC               Pentax_AWS  1
## 57              McGrath_MAC                  Truview  2
## 58               Pentax_AWS        Pentax_AWS_Miller  1
## 59               Pentax_AWS                  Truview  1
plot(graph.first,use.description = TRUE,vertex.shape="circle",vertex.color="white",vertex.size=15,vertex.label.color="dark blue",vertex.label.cex=1.0,edge.color="light blue")

model.first <- gemtc::mtc.model(graph.first,likelihood = "binom",link = "logit",linearModel = "random",n.chain = 4)
mcmc.first.1 <- gemtc::mtc.run(model.first, n.adapt = 50, n.iter = 1000, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 320
##    Unobserved stochastic nodes: 339
##    Total graph size: 6616
## 
## Initializing model
## Warning in rjags::jags.model(file.model, data = syntax[["data"]], inits =
## syntax[["inits"]], : Adaptation incomplete
## NOTE: Stopping adaptation
summary(mcmc.first.1)
## 
## Results on the Log Odds Ratio scale
## 
## Iterations = 60:1050
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 100 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                           Mean     SD Naive SE Time-series SE
## d.Glidescope.Airtraq                  -0.46554 0.3190 0.015951       0.017894
## d.Glidescope.APA_DAB                   2.89970 1.0546 0.052732       0.048698
## d.Glidescope.C_MAC_D                  -0.08261 0.4900 0.024500       0.024541
## d.Glidescope.C_MAC_MAC                -0.51562 0.3551 0.017755       0.018773
## d.Glidescope.Imago_V_Blade             1.40218 1.6011 0.080057       0.084522
## d.Glidescope.Kingvision_Channeled      0.61165 0.4284 0.021421       0.021328
## d.Glidescope.Kingvision_Non_channeled -0.33626 0.5563 0.027814       0.024508
## d.Glidescope.Macintosh                 0.64415 0.2329 0.011643       0.012213
## d.Glidescope.McGrath_MAC              -0.17343 0.4312 0.021558       0.020621
## d.Glidescope.McGrath_Series_5          0.14090 0.4137 0.020687       0.022464
## d.Glidescope.Pentax_AWS                0.13555 0.4420 0.022100       0.023624
## d.Glidescope.Truview                   0.05290 0.5840 0.029199       0.027081
## d.Macintosh.Airtraq_Non_channeled     -1.32951 1.0998 0.054988       0.060687
## d.Macintosh.APA_MAC                   -0.43195 0.8365 0.041827       0.043762
## d.Macintosh.CEL_100                   -1.22837 1.3138 0.065691       0.070016
## d.Macintosh.Pentax_AWS_Miller          0.52852 1.0262 0.051312       0.050359
## d.Macintosh.Tosight                   -1.03877 1.3116 0.065580       0.060554
## d.Macintosh.UESCOPE_MAC               -0.46873 1.1994 0.059968       0.060018
## sd.d                                   1.23798 0.1248 0.006241       0.007114
## 
## 2. Quantiles for each variable:
## 
##                                          2.5%     25%      50%      75%  97.5%
## d.Glidescope.Airtraq                  -1.0588 -0.6834 -0.48791 -0.24695 0.2217
## d.Glidescope.APA_DAB                   0.9208  2.1476  2.79378  3.63049 4.8676
## d.Glidescope.C_MAC_D                  -1.0213 -0.4321 -0.06773  0.25942 0.9001
## d.Glidescope.C_MAC_MAC                -1.1689 -0.7670 -0.53572 -0.28298 0.2365
## d.Glidescope.Imago_V_Blade            -1.5653  0.2719  1.31293  2.49997 4.4797
## d.Glidescope.Kingvision_Channeled     -0.1724  0.3136  0.62872  0.88597 1.4165
## d.Glidescope.Kingvision_Non_channeled -1.3839 -0.7071 -0.34335  0.02719 0.7169
## d.Glidescope.Macintosh                 0.1921  0.4970  0.64873  0.79296 1.1193
## d.Glidescope.McGrath_MAC              -0.9857 -0.4530 -0.16945  0.11444 0.6291
## d.Glidescope.McGrath_Series_5         -0.6014 -0.1451  0.14643  0.39190 0.9374
## d.Glidescope.Pentax_AWS               -0.6356 -0.1544  0.15682  0.40989 0.9348
## d.Glidescope.Truview                  -1.1345 -0.3568  0.09863  0.47182 1.1174
## d.Macintosh.Airtraq_Non_channeled     -3.3230 -2.1238 -1.31988 -0.57957 0.8900
## d.Macintosh.APA_MAC                   -1.9273 -0.9949 -0.45395  0.20952 1.1754
## d.Macintosh.CEL_100                   -3.8333 -2.0489 -1.09365 -0.33293 1.3733
## d.Macintosh.Pentax_AWS_Miller         -1.3321 -0.2344  0.52106  1.27804 2.4215
## d.Macintosh.Tosight                   -3.4812 -1.9534 -1.08290 -0.15817 1.4827
## d.Macintosh.UESCOPE_MAC               -2.7890 -1.2872 -0.51898  0.44496 1.8021
## sd.d                                   1.0055  1.1541  1.23981  1.33202 1.4873
## 
## -- Model fit (residual deviance):
## 
##     Dbar       pD      DIC 
## 305.2277 234.4140 539.6416 
## 
## 320 data points, ratio 0.9538, I^2 = 0%
mcmc.first.2 <- gemtc::mtc.run(model.first, n.adapt = 5000, n.iter = 1e5, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 320
##    Unobserved stochastic nodes: 339
##    Total graph size: 6616
## 
## Initializing model
summary(mcmc.first.2)
## 
## Results on the Log Odds Ratio scale
## 
## Iterations = 5010:105000
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 10000 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                           Mean     SD  Naive SE Time-series SE
## d.Glidescope.Airtraq                  -0.45529 0.3255 0.0016276      0.0018183
## d.Glidescope.APA_DAB                   2.90669 1.0061 0.0050306      0.0049932
## d.Glidescope.C_MAC_D                  -0.09274 0.4752 0.0023758      0.0025856
## d.Glidescope.C_MAC_MAC                -0.52531 0.3452 0.0017262      0.0019331
## d.Glidescope.Imago_V_Blade             1.26173 1.5829 0.0079143      0.0085710
## d.Glidescope.Kingvision_Channeled      0.61265 0.4083 0.0020417      0.0021706
## d.Glidescope.Kingvision_Non_channeled -0.33880 0.5615 0.0028077      0.0029330
## d.Glidescope.Macintosh                 0.65169 0.2267 0.0011336      0.0013239
## d.Glidescope.McGrath_MAC              -0.16251 0.4153 0.0020763      0.0022376
## d.Glidescope.McGrath_Series_5          0.14062 0.4270 0.0021351      0.0022417
## d.Glidescope.Pentax_AWS                0.12091 0.4177 0.0020885      0.0021991
## d.Glidescope.Truview                   0.10864 0.5998 0.0029992      0.0033401
## d.Macintosh.Airtraq_Non_channeled     -1.31151 1.1025 0.0055124      0.0056325
## d.Macintosh.APA_MAC                   -0.46268 0.8280 0.0041398      0.0042378
## d.Macintosh.CEL_100                   -1.14158 1.3193 0.0065964      0.0066612
## d.Macintosh.Pentax_AWS_Miller          0.49671 1.1068 0.0055338      0.0055812
## d.Macintosh.Tosight                   -1.04156 1.3213 0.0066067      0.0066459
## d.Macintosh.UESCOPE_MAC               -0.47516 1.1683 0.0058415      0.0059685
## sd.d                                   1.23229 0.1214 0.0006071      0.0008048
## 
## 2. Quantiles for each variable:
## 
##                                          2.5%     25%      50%      75%  97.5%
## d.Glidescope.Airtraq                  -1.0979 -0.6723 -0.45292 -0.23601 0.1832
## d.Glidescope.APA_DAB                   0.9303  2.2374  2.90644  3.57595 4.8762
## d.Glidescope.C_MAC_D                  -1.0269 -0.4099 -0.09163  0.22221 0.8510
## d.Glidescope.C_MAC_MAC                -1.2092 -0.7547 -0.52429 -0.29257 0.1498
## d.Glidescope.Imago_V_Blade            -1.7812  0.1885  1.24024  2.29628 4.4107
## d.Glidescope.Kingvision_Channeled     -0.1899  0.3385  0.61302  0.88532 1.4145
## d.Glidescope.Kingvision_Non_channeled -1.4397 -0.7170 -0.34153  0.04158 0.7656
## d.Glidescope.Macintosh                 0.2090  0.5005  0.64984  0.80262 1.1019
## d.Glidescope.McGrath_MAC              -0.9812 -0.4418 -0.16352  0.11603 0.6531
## d.Glidescope.McGrath_Series_5         -0.7029 -0.1417  0.13987  0.42647 0.9761
## d.Glidescope.Pentax_AWS               -0.7058 -0.1579  0.12007  0.40048 0.9402
## d.Glidescope.Truview                  -1.0736 -0.2912  0.10786  0.50795 1.2943
## d.Macintosh.Airtraq_Non_channeled     -3.4824 -2.0465 -1.31374 -0.57083 0.8539
## d.Macintosh.APA_MAC                   -2.0986 -1.0119 -0.45865  0.09148 1.1699
## d.Macintosh.CEL_100                   -3.7469 -2.0182 -1.14305 -0.26203 1.4598
## d.Macintosh.Pentax_AWS_Miller         -1.6750 -0.2457  0.49201  1.23962 2.6650
## d.Macintosh.Tosight                   -3.6472 -1.9190 -1.03780 -0.14796 1.5405
## d.Macintosh.UESCOPE_MAC               -2.8174 -1.2503 -0.45987  0.30543 1.8064
## sd.d                                   1.0108  1.1482  1.22615  1.31042 1.4901
## 
## -- Model fit (residual deviance):
## 
##     Dbar       pD      DIC 
## 304.8321 233.3743 538.2063 
## 
## 320 data points, ratio 0.9526, I^2 = 0%
gelman.plot(mcmc.first.1)

gelman.plot(mcmc.first.2)

gelman.diag(mcmc.first.1)$mpsrf
## [1] 1.087883
gelman.diag(mcmc.first.2)$mpsrf
## [1] 1.001117
nodesplit.first <- gemtc::mtc.nodesplit(graph.first, linearModel = "random", likelihood = "binom",link = "logit",n.adapt = 5000, n.iter = 1e5, thin = 10)
summary(nodesplit.first)
forest(relative.effect(mcmc.first.2, t1 = "Macintosh"), use.description = TRUE,xlim = c(-10, 10))

rank.probability.first <- gemtc::rank.probability(mcmc.first.2, preferredDirection = -1)
plot(rank.probability.first, beside=TRUE)

sucra.first <- dmetar::sucra(rank.probability.first, lower.is.better = FALSE)
sucra.first
##                               SUCRA
## C_MAC_MAC                0.78887083
## Airtraq                  0.76210417
## Airtraq_Non_channeled    0.72764028
## Kingvision_Non_channeled 0.68627917
## CEL_100                  0.66339861
## Tosight                  0.63697917
## McGrath_MAC              0.62161389
## C_MAC_D                  0.58736806
## Glidescope               0.54150000
## Truview                  0.49870139
## UESCOPE_MAC              0.48837500
## Pentax_AWS               0.48756250
## McGrath_Series_5         0.47731944
## APA_MAC                  0.47327361
## Kingvision_Channeled     0.27749583
## Imago_V_Blade            0.27341250
## Macintosh                0.24344028
## Pentax_AWS_Miller        0.23841667
## APA_DAB                  0.02624861
meta::metaprop(event = net.first$responders[net.first$treatment=="Macintosh"],n= net.first$sampleSize[net.first$treatment=="Macintosh"], studlab = net.first$study[net.first$treatment=="Macintosh"],data = net.first[net.first$treatment=="Macintosh",],method = "GLMM",sm = "PLOGIT",comb.fixed = FALSE,comb.random = TRUE,hakn = TRUE)
##                          proportion           95%-CI
## Abdallah 2011                0.0816 [0.0227; 0.1960]
## Abdallah 2019                0.0571 [0.0070; 0.1916]
## Abdelgalel 2018              0.2750 [0.1460; 0.4389]
## Abdelgawad 2015              0.0250 [0.0006; 0.1316]
## Agrawal 2020                 0.0250 [0.0006; 0.1316]
## Ahmad 2016                   0.0000 [0.0000; 0.0377]
## Ahmad 2015                   0.0000 [0.0000; 0.1372]
## Akbar 2015                   0.1333 [0.0505; 0.2679]
## Al - Ghamdi 2016             0.2727 [0.1073; 0.5022]
## Ali 2012                     0.3600 [0.1797; 0.5748]
## Ali 2017                     0.1000 [0.0211; 0.2653]
## Altun 2018                   0.1500 [0.0571; 0.2984]
## Ander 2017                   0.1282 [0.0430; 0.2743]
## Andersen 2011                0.0800 [0.0222; 0.1923]
## Aoi 2010                     0.2222 [0.0641; 0.4764]
## Aqil 2016                    0.1750 [0.0734; 0.3278]
## Aqil 2017                    0.2143 [0.1252; 0.3287]
## Arici 2014                   0.0000 [0.0000; 0.0881]
## Arima 2014                   0.2453 [0.1376; 0.3828]
## Arslan 2017                  0.0000 [0.0000; 0.0881]
## Bakshi 2019                  0.0278 [0.0007; 0.1453]
## Barak 2007                   0.1111 [0.0546; 0.1949]
## Barman 2017                  0.0000 [0.0000; 0.1000]
## Bashir 2020                  0.1750 [0.0734; 0.3278]
## Colak 2019                   0.0889 [0.0248; 0.2122]
## El - Tahan 2018              0.1250 [0.0351; 0.2899]
## Huang 2020                   0.2000 [0.0771; 0.3857]
## Wasem 2013                   0.1333 [0.0376; 0.3072]
## Yao 2015                     0.0000 [0.0000; 0.0740]
## Bhalla 2018                  0.1333 [0.0166; 0.4046]
## Bharti 2014                  0.1579 [0.0338; 0.3958]
## Bhat 2015                    0.1400 [0.0582; 0.2674]
## Bilehjani 2009               0.0789 [0.0166; 0.2138]
## Blajic 2019                  0.0508 [0.0106; 0.1415]
## \x82akir 2020                0.0323 [0.0008; 0.1670]
## Carlino 2009                 0.4667 [0.2127; 0.7341]
## Dey 2020                     0.4273 [0.3334; 0.5252]
## Di Marco 2011                0.2037 [0.1063; 0.3353]
## Driver 2016                  0.1368 [0.0749; 0.2226]
## Erden 2010                   0.0000 [0.0000; 0.2059]
## Erturk 2015                  0.1750 [0.0734; 0.3278]
## Gao 2018                     0.3049 [0.2080; 0.4164]
## Goksu 2016                   0.4133 [0.3008; 0.5330]
## Griesdale 2012               0.6500 [0.4078; 0.8461]
## Gunes 2020                   0.0778 [0.0318; 0.1537]
## Gupta 2020                   0.0500 [0.0061; 0.1692]
## Hirabayashi 2009             0.3008 [0.2453; 0.3610]
## Hirabayashi 2010             0.2300 [0.1517; 0.3249]
## Hosalli 2017                 0.2333 [0.0993; 0.4228]
## Hsu 2012                     0.1333 [0.0376; 0.3072]
## Hu 2017                      0.0208 [0.0025; 0.0732]
## Kaur 2020                    0.1250 [0.0419; 0.2680]
## Kido 2015                    0.3600 [0.1797; 0.5748]
## Kim 2013                     0.1739 [0.0495; 0.3878]
## Kleine-Brueggeney 2017       0.5750 [0.4815; 0.6647]
## Koh 2010                     0.6000 [0.3867; 0.7887]
## Kreutziger 2019              0.1700 [0.1254; 0.2228]
## K\x9f\x8d\x9fkosman 2020     0.0000 [0.0000; 0.1157]
## Kunaz 2016                   0.0600 [0.0125; 0.1655]
## Laosuwan 2015                0.0909 [0.0023; 0.4128]
## Lascarrou 2017               0.2857 [0.2213; 0.3572]
## Lim 2005                     0.1333 [0.0376; 0.3072]
## Lin 2012                     0.2353 [0.1500; 0.3397]
## Liu 2014                     0.2500 [0.1269; 0.4120]
## Liu 2016                     0.0667 [0.0249; 0.1395]
## Liu 2019                     0.0994 [0.0600; 0.1526]
## Macke 2020                   0.2105 [0.1254; 0.3192]
## Maharaj 2006                 0.0333 [0.0008; 0.1722]
## Maharaj 2007                 0.0500 [0.0013; 0.2487]
## Maharaj 2008                 0.3500 [0.1539; 0.5922]
## Malik 2008                   0.1333 [0.0376; 0.3072]
## Malik1 2009                  0.3200 [0.1495; 0.5350]
## Malik2 2009                  0.0333 [0.0008; 0.1722]
## Mcelwain 2011                0.1935 [0.0745; 0.3747]
## Myunghun-Kim 2017            0.0000 [0.0000; 0.1684]
## Kulkarni 2013                0.0000 [0.0000; 0.1157]
## Inangil 2018                 0.0571 [0.0070; 0.1916]
## Ing 2017                     0.0667 [0.0017; 0.3195]
## Jafra 2018                   0.0000 [0.0000; 0.0362]
## Ndoko 2008                   0.0755 [0.0209; 0.1821]
## Nishiyama 2011               0.1143 [0.0320; 0.2674]
## Parasa 2016                  0.0000 [0.0000; 0.1157]
## Pazur 2016                   0.0000 [0.0000; 0.1323]
## Peirovifar 2014              0.4000 [0.1912; 0.6395]
## Pournajafian 2014            0.0000 [0.0000; 0.0725]
## Ranieri 2012                 0.1250 [0.0555; 0.2315]
## Reena 2019                   0.2600 [0.1463; 0.4034]
## Risse 2020                   0.0968 [0.0204; 0.2575]
## Ruetzeler 2020               0.1111 [0.0459; 0.2156]
## Sargin 2016                  0.1400 [0.0582; 0.2674]
## Shah 2016                    0.4667 [0.2834; 0.6567]
## Shukla 2017                  0.2000 [0.0905; 0.3565]
## Sulser 2016                  0.0000 [0.0000; 0.0493]
## Sun 2005                     0.0300 [0.0062; 0.0852]
## Takenaka 2011                0.1471 [0.0495; 0.3106]
## Taylor 2013                  0.4091 [0.2634; 0.5675]
## Teoh 2010                    0.0200 [0.0024; 0.0704]
## Tsan 2020                    0.0435 [0.0091; 0.1218]
## Varsha 2019                  0.0571 [0.0070; 0.1916]
## Vijayakumar 2016             0.0000 [0.0000; 0.0787]
## Yoo 2018                     0.2273 [0.0782; 0.4537]
## Yumul 2016                   0.2581 [0.1186; 0.4461]
## El-Tahan 2017                0.0000 [0.0000; 0.2316]
## Lee 2012                     0.1600 [0.0454; 0.3608]
## Paik 2020                    0.0000 [0.0000; 0.3085]
## Zhao 2014                    0.6400 [0.5209; 0.7477]
## Ferrando 2011                0.2000 [0.0771; 0.3857]
## Serocki 2013                 0.1562 [0.0528; 0.3279]
## Serocki 2010                 0.1250 [0.0419; 0.2680]
## Arora 2013                   0.0000 [0.0000; 0.0660]
## 
## Number of studies combined: k = 110
## 
##                      proportion           95%-CI
## Random effects model     0.1042 [0.0817; 0.1320]
## 
## Quantifying heterogeneity:
##  tau^2 = 1.5543; tau = 1.2467; I^2 = 90.1%; H = 3.18
## 
## Test of heterogeneity:
##       Q d.f.  p-value             Test
##  486.12  109 < 0.0001        Wald-type
##  919.51  109 < 0.0001 Likelihood-Ratio
## 
## Details on meta-analytical method:
## - Random intercept logistic regression model
## - Maximum-likelihood estimator for tau^2
## - Hartung-Knapp adjustment for random effects model
## - Logit transformation
## - Clopper-Pearson confidence interval for individual studies
## - Continuity correction of 0.5 in studies with zero cell frequencies
##   (only used to calculate individual study results)
#Funnel Plot for failed first intubation attempt
funnel.first<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Tables Funnel Plots/Funnel Network First.csv")
funnel.first_analysis<-dplyr::filter(funnel.first,funnel.first$ffirst.e1>0 | funnel.first$ffirst.e2>0)
mbin_funnel.first_random<-meta::metabin(ffirst.e1,ffirst.t1,ffirst.e2,ffirst.t2,data = funnel.first_analysis,studlab =study,comb.fixed = FALSE,comb.random = TRUE,method.tau = "SJ",hakn = TRUE,prediction = TRUE,incr = 0.5,sm = "RR")
mbin_funnel.first_random
##                             RR             95%-CI %W(random)
## Abdallah 2011           1.7150 [0.5357;   5.4904]        0.7
## Abdallah 2019           0.5000 [0.0475;   5.2653]        0.4
## Abdelgalel 2018         0.1818 [0.0430;   0.7685]        0.6
## Abdelgalel 2018         0.2727 [0.0822;   0.9046]        0.7
## Abdelgawad 2015         0.3333 [0.0140;   7.9424]        0.3
## Agrawal 2020            0.3333 [0.0140;   7.9424]        0.3
## Ahmed 2017              0.6667 [0.1198;   3.7087]        0.5
## Akbar 2015              0.1667 [0.0209;   1.3291]        0.5
## Akbas 2019              1.0000 [0.2146;   4.6605]        0.6
## Al - Ghamdi 2016        1.0476 [0.4008;   2.7380]        0.8
## Al - Ghamdi 2016        2.0952 [0.9639;   4.5544]        0.9
## Al - Ghamdi 2016        1.1667 [0.4666;   2.9170]        0.8
## Ali 2012                0.3333 [0.1021;   1.0883]        0.7
## Ali 2015                4.0000 [0.4801;  33.3254]        0.4
## Ali 2017                0.3333 [0.0367;   3.0260]        0.4
## Altun 2018              0.8333 [0.2766;   2.5108]        0.8
## Altun 2018              0.8333 [0.2766;   2.5108]        0.8
## Alvis 2016              0.0627 [0.0037;   1.0528]        0.3
## Ander 2017              0.0909 [0.0052;   1.5894]        0.3
## Andersen 2011           0.2500 [0.0289;   2.1590]        0.4
## Aoi 2010                1.0000 [0.2946;   3.3948]        0.7
## Aqil 2016               0.8571 [0.3158;   2.3264]        0.8
## Aqil 2017               0.4000 [0.1648;   0.9710]        0.8
## Arima 2014              2.1841 [1.2837;   3.7161]        0.9
## Arslan 2015             0.0833 [0.0114;   0.6111]        0.5
## Arslan 2017             5.0000 [0.2477; 100.9273]        0.3
## Arslan 2017             3.0000 [0.1259;  71.4820]        0.3
## Aziz 2016               1.7268 [1.0215;   2.9192]        1.0
## Bakshi 2019             0.9730 [0.0632;  14.9718]        0.3
## Barak 2007              0.4500 [0.1469;   1.3788]        0.8
## Bashir 2020             0.2857 [0.0632;   1.2922]        0.6
## Belze 2017              0.8750 [0.3547;   2.1588]        0.8
## Bhalla 2018             2.0000 [0.4291;   9.3210]        0.6
## Bharti 2014             0.3167 [0.0360;   2.7855]        0.4
## Bhat 2015               0.4286 [0.1174;   1.5639]        0.7
## Bhola 2014              0.1111 [0.0061;   2.0105]        0.3
## Bilehjani 2009          3.4833 [1.0524;  11.5293]        0.7
## Blajic 2019             1.0000 [0.2103;   4.7545]        0.6
## Blajic 2019             0.3278 [0.0351;   3.0619]        0.4
## Brozek 2020             2.5000 [1.0475;   5.9664]        0.8
## Bruck 2015              6.3462 [1.5460;  26.0494]        0.6
## \x82akir 2020           0.3333 [0.0141;   7.8748]        0.3
## Carlino 2009            0.0667 [0.0042;   1.0690]        0.3
## Cattano 2012            1.0000 [0.2191;   4.5639]        0.6
## Cavus 2018              0.9206 [0.3823;   2.2168]        0.8
## Cavus 2018              0.2755 [0.1380;   0.5499]        0.9
## Chanchayanon 2018       7.0000 [0.3854; 127.1247]        0.3
## Colak 2019              0.2500 [0.0291;   2.1505]        0.4
## Dey 2020                0.3684 [0.2264;   0.5996]        1.0
## Di Marco 2011           0.6364 [0.2668;   1.5179]        0.8
## Driver 2016             0.5676 [0.2462;   1.3087]        0.9
## El - Tahan 2018         1.6471 [0.5322;   5.0970]        0.7
## El - Tahan 2018         1.6000 [0.5163;   4.9584]        0.7
## El - Tahan 2018         3.0000 [1.0818;   8.3196]        0.8
## El-Shmaa 2020           0.5000 [0.0472;   5.2961]        0.4
## El-Tahan 2017           4.6774 [0.2446;  89.4307]        0.3
## Erden 2010              3.0000 [0.1315;  68.4178]        0.3
## Erturk 2015             0.4286 [0.1192;   1.5407]        0.7
## Ferrando 2011           0.1667 [0.0213;   1.3020]        0.5
## Gao 2018                1.0528 [0.6679;   1.6597]        1.0
## Goksu 2016              0.6129 [0.3819;   0.9835]        1.0
## Griesdale 2012          0.9231 [0.5705;   1.4934]        1.0
## Gunes 2020              0.2857 [0.0610;   1.3382]        0.6
## Gupta 2020              0.5000 [0.0472;   5.2961]        0.4
## Hirabayashi 2009        0.1385 [0.0754;   0.2544]        0.9
## Hirabayashi 2010        0.2609 [0.1110;   0.6132]        0.8
## Hosalli 2017            0.4286 [0.1223;   1.5022]        0.7
## Hsu 2012                0.1111 [0.0062;   1.9760]        0.3
## Hu 2017                 0.1920 [0.0093;   3.9489]        0.3
## Huang 2020              3.1034 [1.4367;   6.7038]        0.9
## Huang 2020              2.3333 [1.0364;   5.2530]        0.9
## Inangil 2018            0.2000 [0.0100;   4.0192]        0.3
## Ing 2017                2.7273 [0.2815;  26.4214]        0.4
## Jeon 2011               3.0000 [0.3318;  27.1216]        0.4
## Kaur 2020               0.2000 [0.0244;   1.6362]        0.4
## Kaur 2020               0.0909 [0.0052;   1.5908]        0.3
## Kido 2015               0.1111 [0.0152;   0.8130]        0.5
## Kim 2013                0.1160 [0.0066;   2.0345]        0.3
## Kleine-Brueggeney 2016  0.3333 [0.1371;   0.8106]        0.8
## Kleine-Brueggeney 2016  1.0000 [0.5475;   1.8264]        0.9
## Kleine-Brueggeney 2016  0.1667 [0.0504;   0.5510]        0.7
## Kleine-Brueggeney 2016  4.2222 [2.6999;   6.6029]        1.0
## Kleine-Brueggeney 2016  0.8889 [0.4762;   1.6591]        0.9
## Kleine-Brueggeney 2017  0.3478 [0.2356;   0.5135]        1.0
## Kleine-Brueggeney 2017  0.9275 [0.7389;   1.1643]        1.0
## Kleine-Brueggeney 2017  0.1739 [0.0995;   0.3040]        0.9
## Koh 2010                0.0667 [0.0095;   0.4671]        0.5
## Kreutziger 2019         1.2289 [0.8567;   1.7628]        1.0
## Kulkarni 2013           3.0000 [0.1271;  70.7833]        0.3
## Kunaz 2016              1.3333 [0.3144;   5.6542]        0.6
## Lange 2009              0.5000 [0.0479;   5.2245]        0.4
## Laosuwan 2015           2.0000 [0.2107;  18.9807]        0.4
## Lascarrou 2017          1.0902 [0.7957;   1.4937]        1.0
## Lee 2012                3.7500 [1.4452;   9.7303]        0.8
## Lee 2012                4.2500 [1.6642;  10.8534]        0.8
## Lee 2012                1.0000 [0.2808;   3.5616]        0.7
## Lim 2005                0.5000 [0.0989;   2.5270]        0.6
## Lin 2012                0.4000 [0.1865;   0.8577]        0.9
## Liu 2009                0.1667 [0.0211;   1.3136]        0.5
## Liu 2014                0.1000 [0.0134;   0.7451]        0.5
## Liu 2016                1.6667 [0.6324;   4.3923]        0.8
## Liu 2019                0.3932 [0.1684;   0.9184]        0.8
## Macke 2020              0.2500 [0.0876;   0.7133]        0.8
## Maharaj 2006            0.3333 [0.0141;   7.8648]        0.3
## Maharaj 2007            0.3333 [0.0144;   7.7130]        0.3
## Maharaj 2008            0.1429 [0.0193;   1.0572]        0.5
## Malik 2008              1.5000 [0.4705;   4.7827]        0.7
## Malik 2008              0.5000 [0.0989;   2.5270]        0.6
## Malik 2008              0.7500 [0.1833;   3.0683]        0.6
## Malik1 2009             0.8750 [0.3742;   2.0462]        0.8
## Malik1 2009             0.3750 [0.1123;   1.2520]        0.7
## Malik2 2009             2.0000 [0.1914;  20.8980]        0.4
## Markham 2019            2.0000 [1.0450;   3.8278]        0.9
## Markham 2019            0.7273 [0.3100;   1.7060]        0.8
## Massen 2009             0.3478 [0.2303;   0.5253]        1.0
## Massen 2009             0.9130 [0.7890;   1.0566]        1.0
## Mcelwain 2011           0.5345 [0.1471;   1.9418]        0.7
## Mcelwain 2011           0.1782 [0.0228;   1.3917]        0.5
## Mendonca 2018           1.0408 [0.2206;   4.9113]        0.6
## Mendonca 2018           1.4412 [0.2515;   8.2579]        0.5
## Myunghun-Kim 2017       3.0000 [0.1297;  69.4167]        0.3
## Ndoko 2008              0.1111 [0.0061;   2.0134]        0.3
## Ng 2012                 2.8571 [1.2979;   6.2897]        0.9
## Nishiyama 2011          1.7014 [0.5458;   5.3037]        0.7
## Nishiyama 2011          3.6029 [1.3175;   9.8526]        0.8
## Nishiyama 2011          3.8889 [1.4422;  10.4865]        0.8
## Parasa 2016            13.0000 [0.7654; 220.8017]        0.3
## Peirovifar 2014         0.5000 [0.1789;   1.3975]        0.8
## Ranieri 2012            0.0554 [0.0033;   0.9403]        0.3
## Raza 2017               0.6000 [0.1573;   2.2889]        0.7
## Reena 2019              0.3077 [0.1077;   0.8792]        0.8
## Risse 2020              1.5196 [0.3955;   5.8387]        0.7
## Ruetzeler 2020          0.6818 [0.2282;   2.0370]        0.8
## Sahajanandan 2019       6.1935 [0.7905;  48.5253]        0.5
## Sargin 2016             0.0667 [0.0039;   1.1365]        0.3
## Serocki 2013            1.0000 [0.3203;   3.1225]        0.7
## Serocki 2013            0.4129 [0.0864;   1.9722]        0.6
## Serocki 2010            1.0000 [0.3136;   3.1884]        0.7
## Serocki 2010            0.4000 [0.0824;   1.9423]        0.6
## Shah 2016               0.2857 [0.1062;   0.7684]        0.8
## Shim 2016               1.0345 [0.3343;   3.2012]        0.7
## Shravanalakshmi 2017    7.0000 [0.3721; 131.6921]        0.3
## Shravanalakshmi 2017    5.0000 [0.2468; 101.2778]        0.3
## Shukla 2017             0.1250 [0.0164;   0.9538]        0.5
## Sulser 2016             2.9597 [0.1225;  71.4820]        0.3
## Sun 2005                2.0000 [0.5144;   7.7761]        0.7
## Takenaka 2011           0.0883 [0.0051;   1.5379]        0.3
## Taylor 2013             0.0270 [0.0017;   0.4348]        0.3
## Teoh 2009               1.5000 [0.5639;   3.9904]        0.8
## Teoh 2010               2.5000 [0.4966;  12.5852]        0.6
## Teoh 2010               3.5000 [0.7452;  16.4379]        0.6
## Teoh 2010               4.5000 [0.9972;  20.3071]        0.6
## Tsan 2020               0.3333 [0.0355;   3.1262]        0.4
## Vargas 2017             2.5000 [0.5445;  11.4786]        0.6
## Varsha 2019             0.2000 [0.0100;   4.0192]        0.3
## Wan 2016                1.5349 [0.2696;   8.7372]        0.5
## Wasem 2013              0.5000 [0.0989;   2.5270]        0.6
## Yoo 2018                0.2000 [0.0254;   1.5756]        0.5
## Yumul 2016              0.2583 [0.0596;   1.1190]        0.6
## Yumul 2016              0.2583 [0.0596;   1.1190]        0.6
## Yumul 2016              1.1625 [0.5175;   2.6115]        0.9
## Zhao 2014               0.4223 [0.2800;   0.6370]        1.0
## 
## Number of studies combined: k = 162
## 
##                          RR           95%-CI     t p-value
## Random effects model 0.7354 [0.6209; 0.8711] -3.58  0.0004
## Prediction interval         [0.1144; 4.7298]              
## 
## Quantifying heterogeneity:
##  tau^2 = 0.8808 [0.3899; 0.9161]; tau = 0.9385 [0.6244; 0.9571];
##  I^2 = 69.3% [64.0%; 73.8%]; H = 1.80 [1.67; 1.95]
## 
## Test of heterogeneity:
##       Q d.f.  p-value
##  524.36  161 < 0.0001
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Continuity correction of 0.5 in studies with zero cell frequencies
meta::funnel(mbin_funnel.first_random,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))

dmetar::eggers.test(mbin_funnel.first_random)
##              Intercept ConfidenceInterval      t       p
## Egger's test    -0.369       -0.761-0.023 -1.704 0.09034

Network meta-analysis of failed intubation within 2 attemtps

net.two<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Table Analysis Network 2 Attempts.csv")
sum(net.two$sampleSize)
## [1] 12480
table(net.two$treatment)
## 
##                  Airtraq                  APA_MAC                  C_MAC_D 
##                       29                        2                       10 
##                C_MAC_MAC                  CEL_100               Glidescope 
##                       18                        1                       46 
##            imago_v_blade     Kingvision_Channeled Kingvision_Non_channeled 
##                        1                       13                        5 
##                Macintosh              McGrath_MAC         McGrath_Series_5 
##                       91                       15                       13 
##               Pentax_AWS        Pentax_AWS_Miller                  Truview 
##                       14                        1                        9
id<-c("Airtraq","APA_MAC","C_MAC_D","C_MAC_MAC","CEL_100","Glidescope","imago_v_blade","Kingvision_Channeled","Kingvision_Non_channeled","Macintosh","McGrath_MAC","McGrath_Series_5","Pentax_AWS","Pentax_AWS_Miller","Truview")
description<-c("Airtraq","APA MAC","C-MAC D","C-MAC","CEL 100","Glidescope","Imago V-Blade","King Vision Channeled","King Vision Non-channeled","Macintosh","McGrath MAC","McGrath Series 5", "Pentax AWS","Pentax AWS Miller","Truview")
treat.codes.two<-data.frame(id,description)
graph.two<-gemtc::mtc.network(data.ab = net.two,treatments = treat.codes.two)
summary(graph.two)
## $Description
## [1] "MTC dataset: Network"
## 
## $`Studies per treatment`
##                  Airtraq                  APA_MAC                  C_MAC_D 
##                       29                        2                       10 
##                C_MAC_MAC                  CEL_100               Glidescope 
##                       18                        1                       46 
##            imago_v_blade     Kingvision_Channeled Kingvision_Non_channeled 
##                        1                       13                        5 
##                Macintosh              McGrath_MAC         McGrath_Series_5 
##                       91                       15                       13 
##               Pentax_AWS        Pentax_AWS_Miller                  Truview 
##                       14                        1                        9 
## 
## $`Number of n-arm studies`
## 2-arm 3-arm 4-arm 5-arm 
##   100    13     6     1 
## 
## $`Studies per treatment comparison`
##                          t1                       t2 nr
## 1                   Airtraq                  C_MAC_D  1
## 2                   Airtraq                C_MAC_MAC  2
## 3                   Airtraq               Glidescope  7
## 4                   Airtraq     Kingvision_Channeled  3
## 5                   Airtraq Kingvision_Non_channeled  1
## 6                   Airtraq                Macintosh 20
## 7                   Airtraq              McGrath_MAC  1
## 8                   Airtraq         McGrath_Series_5  2
## 9                   Airtraq               Pentax_AWS  2
## 10                  Airtraq        Pentax_AWS_Miller  1
## 11                  APA_MAC                C_MAC_MAC  1
## 12                  APA_MAC Kingvision_Non_channeled  1
## 13                  APA_MAC                Macintosh  1
## 14                  C_MAC_D                C_MAC_MAC  1
## 15                  C_MAC_D               Glidescope  4
## 16                  C_MAC_D     Kingvision_Channeled  3
## 17                  C_MAC_D Kingvision_Non_channeled  1
## 18                  C_MAC_D                Macintosh  5
## 19                  C_MAC_D              McGrath_MAC  1
## 20                C_MAC_MAC               Glidescope  4
## 21                C_MAC_MAC     Kingvision_Channeled  1
## 22                C_MAC_MAC Kingvision_Non_channeled  1
## 23                C_MAC_MAC                Macintosh 11
## 24                C_MAC_MAC              McGrath_MAC  2
## 25                C_MAC_MAC         McGrath_Series_5  3
## 26                C_MAC_MAC               Pentax_AWS  1
## 27                  CEL_100                Macintosh  1
## 28               Glidescope            imago_v_blade  1
## 29               Glidescope     Kingvision_Channeled  4
## 30               Glidescope Kingvision_Non_channeled  2
## 31               Glidescope                Macintosh 30
## 32               Glidescope              McGrath_MAC  4
## 33               Glidescope         McGrath_Series_5  4
## 34               Glidescope               Pentax_AWS  5
## 35               Glidescope                  Truview  1
## 36     Kingvision_Channeled Kingvision_Non_channeled  1
## 37     Kingvision_Channeled                Macintosh  7
## 38     Kingvision_Channeled              McGrath_MAC  1
## 39 Kingvision_Non_channeled                Macintosh  3
## 40                Macintosh              McGrath_MAC  9
## 41                Macintosh         McGrath_Series_5  7
## 42                Macintosh               Pentax_AWS 10
## 43                Macintosh        Pentax_AWS_Miller  1
## 44                Macintosh                  Truview  8
## 45              McGrath_MAC               Pentax_AWS  1
## 46              McGrath_MAC                  Truview  2
## 47               Pentax_AWS        Pentax_AWS_Miller  1
## 48               Pentax_AWS                  Truview  1
plot(graph.two,use.description = TRUE,vertex.shape="circle",vertex.color="white",vertex.size=15,vertex.label.color="dark blue",vertex.label.cex=1.0,edge.color="light blue")

model.two <- gemtc::mtc.model(graph.two,likelihood = "binom",link = "logit",linearModel = "random",n.chain = 4)
mcmc.two.1 <- gemtc::mtc.run(model.two, n.adapt = 50, n.iter = 1000, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 268
##    Unobserved stochastic nodes: 283
##    Total graph size: 5404
## 
## Initializing model
## Warning in rjags::jags.model(file.model, data = syntax[["data"]], inits =
## syntax[["inits"]], : Adaptation incomplete
## NOTE: Stopping adaptation
summary(mcmc.two.1)
## 
## Results on the Log Odds Ratio scale
## 
## Iterations = 60:1050
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 100 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                           Mean      SD Naive SE Time-series SE
## d.Glidescope.Airtraq                   -0.5019  0.5969  0.02984        0.03655
## d.Glidescope.C_MAC_D                    0.1005  1.1185  0.05592        0.07463
## d.Glidescope.C_MAC_MAC                 -1.5052  0.7431  0.03715        0.05171
## d.Glidescope.imago_v_blade             -0.9984  2.3445  0.11723        0.11758
## d.Glidescope.Kingvision_Channeled      -0.3578  1.1640  0.05820        0.09374
## d.Glidescope.Kingvision_Non_channeled  -2.5546  1.9060  0.09530        0.12531
## d.Glidescope.Macintosh                  0.4408  0.4405  0.02202        0.03152
## d.Glidescope.McGrath_MAC                0.3416  0.9254  0.04627        0.05786
## d.Glidescope.McGrath_Series_5           0.9978  0.8010  0.04005        0.03887
## d.Glidescope.Pentax_AWS                -1.0200  0.8206  0.04103        0.04461
## d.Glidescope.Truview                    0.1982  1.3849  0.06925        0.10863
## d.Macintosh.APA_MAC                   -14.7431 14.3194  0.71597        1.50495
## d.Macintosh.CEL_100                    -2.2599  2.0775  0.10387        0.11037
## d.Macintosh.Pentax_AWS_Miller          -0.2378  1.5766  0.07883        0.08107
## sd.d                                    1.7119  0.2887  0.01444        0.02639
## 
## 2. Quantiles for each variable:
## 
##                                           2.5%      25%      50%      75%
## d.Glidescope.Airtraq                   -1.7840  -0.8838 -0.48004 -0.08376
## d.Glidescope.C_MAC_D                   -1.9797  -0.6115  0.05584  0.84495
## d.Glidescope.C_MAC_MAC                 -3.0207  -2.0132 -1.52108 -1.00976
## d.Glidescope.imago_v_blade             -5.2342  -2.4905 -1.02813  0.50433
## d.Glidescope.Kingvision_Channeled      -2.6474  -1.0884 -0.40149  0.39579
## d.Glidescope.Kingvision_Non_channeled  -6.9033  -3.7092 -2.35809 -1.21502
## d.Glidescope.Macintosh                 -0.3729   0.1424  0.44434  0.73979
## d.Glidescope.McGrath_MAC               -1.5338  -0.2247  0.27709  0.97657
## d.Glidescope.McGrath_Series_5          -0.4851   0.3950  0.98206  1.57639
## d.Glidescope.Pentax_AWS                -2.6912  -1.5277 -1.01236 -0.52400
## d.Glidescope.Truview                   -2.7347  -0.7200  0.27225  1.07223
## d.Macintosh.APA_MAC                   -47.4982 -16.3402 -8.76532 -5.24751
## d.Macintosh.CEL_100                    -6.8677  -3.3771 -2.06910 -0.90295
## d.Macintosh.Pentax_AWS_Miller          -3.2362  -1.2722 -0.22440  0.73541
## sd.d                                    1.2108   1.5031  1.69973  1.88793
##                                          97.5%
## d.Glidescope.Airtraq                   0.56487
## d.Glidescope.C_MAC_D                   2.33615
## d.Glidescope.C_MAC_MAC                -0.09899
## d.Glidescope.imago_v_blade             3.54839
## d.Glidescope.Kingvision_Channeled      1.94259
## d.Glidescope.Kingvision_Non_channeled  0.70799
## d.Glidescope.Macintosh                 1.33751
## d.Glidescope.McGrath_MAC               2.20359
## d.Glidescope.McGrath_Series_5          2.59506
## d.Glidescope.Pentax_AWS                0.65067
## d.Glidescope.Truview                   2.94176
## d.Macintosh.APA_MAC                   -0.87593
## d.Macintosh.CEL_100                    1.36830
## d.Macintosh.Pentax_AWS_Miller          2.95590
## sd.d                                   2.28057
## 
## -- Model fit (residual deviance):
## 
##     Dbar       pD      DIC 
## 155.6315 111.2762 266.9076 
## 
## 268 data points, ratio 0.5807, I^2 = 0%
mcmc.two.2 <- gemtc::mtc.run(model.two, n.adapt = 6000, n.iter = 1e6, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 268
##    Unobserved stochastic nodes: 283
##    Total graph size: 5404
## 
## Initializing model
summary(mcmc.two.2)
## 
## Results on the Log Odds Ratio scale
## 
## Iterations = 6010:1006000
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 1e+05 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                           Mean      SD  Naive SE Time-series SE
## d.Glidescope.Airtraq                   -0.4719  0.6216 0.0009828       0.001353
## d.Glidescope.C_MAC_D                    0.0184  1.0772 0.0017032       0.002257
## d.Glidescope.C_MAC_MAC                 -1.4940  0.7149 0.0011303       0.001647
## d.Glidescope.imago_v_blade             -1.0482  2.3093 0.0036513       0.003752
## d.Glidescope.Kingvision_Channeled      -0.2028  1.1184 0.0017683       0.002521
## d.Glidescope.Kingvision_Non_channeled  -2.3566  1.7300 0.0027354       0.003887
## d.Glidescope.Macintosh                  0.4470  0.4621 0.0007306       0.001076
## d.Glidescope.McGrath_MAC                0.2141  0.9349 0.0014783       0.001956
## d.Glidescope.McGrath_Series_5           0.9870  0.7560 0.0011954       0.001413
## d.Glidescope.Pentax_AWS                -0.9486  0.8616 0.0013622       0.001695
## d.Glidescope.Truview                    0.1265  1.3475 0.0021306       0.002954
## d.Macintosh.APA_MAC                   -22.7049 16.1157 0.0254812       0.403308
## d.Macintosh.CEL_100                    -2.3526  2.1845 0.0034540       0.005262
## d.Macintosh.Pentax_AWS_Miller          -0.3090  1.5998 0.0025296       0.002653
## sd.d                                    1.6810  0.3421 0.0005409       0.001379
## 
## 2. Quantiles for each variable:
## 
##                                           2.5%      25%       50%       75%
## d.Glidescope.Airtraq                   -1.7077  -0.8802  -0.46930  -0.06184
## d.Glidescope.C_MAC_D                   -2.0940  -0.6913   0.01394   0.72479
## d.Glidescope.C_MAC_MAC                 -2.9239  -1.9600  -1.48772  -1.02020
## d.Glidescope.imago_v_blade             -5.7702  -2.5194  -0.99350   0.48182
## d.Glidescope.Kingvision_Channeled      -2.4214  -0.9376  -0.20034   0.53500
## d.Glidescope.Kingvision_Non_channeled  -6.1210  -3.4017  -2.23648  -1.17700
## d.Glidescope.Macintosh                 -0.4481   0.1400   0.44105   0.74691
## d.Glidescope.McGrath_MAC               -1.6069  -0.4046   0.20590   0.82344
## d.Glidescope.McGrath_Series_5          -0.5004   0.4921   0.98517   1.47926
## d.Glidescope.Pentax_AWS                -2.7157  -1.5014  -0.92671  -0.37204
## d.Glidescope.Truview                   -2.5684  -0.7552   0.13823   1.02034
## d.Macintosh.APA_MAC                   -62.5744 -31.4659 -19.02911 -10.19803
## d.Macintosh.CEL_100                    -6.8971  -3.7251  -2.27648  -0.89883
## d.Macintosh.Pentax_AWS_Miller          -3.4975  -1.3482  -0.30135   0.73597
## sd.d                                    1.1009   1.4413   1.64941   1.88699
##                                         97.5%
## d.Glidescope.Airtraq                   0.7450
## d.Glidescope.C_MAC_D                   2.1580
## d.Glidescope.C_MAC_MAC                -0.1005
## d.Glidescope.imago_v_blade             3.3762
## d.Glidescope.Kingvision_Channeled      1.9969
## d.Glidescope.Kingvision_Non_channeled  0.7143
## d.Glidescope.Macintosh                 1.3782
## d.Glidescope.McGrath_MAC               2.0823
## d.Glidescope.McGrath_Series_5          2.4920
## d.Glidescope.Pentax_AWS                0.6907
## d.Glidescope.Truview                   2.7545
## d.Macintosh.APA_MAC                   -2.3785
## d.Macintosh.CEL_100                    1.7638
## d.Macintosh.Pentax_AWS_Miller          2.8416
## sd.d                                   2.4430
## 
## -- Model fit (residual deviance):
## 
##     Dbar       pD      DIC 
## 155.7656 110.6497 266.4153 
## 
## 268 data points, ratio 0.5812, I^2 = 0%
gelman.plot(mcmc.two.1)

gelman.plot(mcmc.two.2)

gelman.diag(mcmc.two.1)$mpsrf
## [1] 5.065269
gelman.diag(mcmc.two.2)$mpsrf
## [1] 1.00331
nodesplit.two <- gemtc::mtc.nodesplit(graph.two, linearModel = "random", likelihood = "binom",link = "logit",n.adapt = 5000, n.iter = 1e5, thin = 10)
summary(nodesplit.two)
forest(relative.effect(mcmc.two.2, t1 = "Macintosh"), use.description = TRUE,xlim = c(-10, 10))

rank.probability.two <- gemtc::rank.probability(mcmc.two.2, preferredDirection = -1)
plot(rank.probability.two, beside=TRUE)

sucra.two <- dmetar::sucra(rank.probability.two, lower.is.better = FALSE)
sucra.two
##                              SUCRA
## APA_MAC                  0.9857732
## Kingvision_Non_channeled 0.7785734
## C_MAC_MAC                0.7405984
## CEL_100                  0.6836421
## Pentax_AWS               0.6216082
## imago_v_blade            0.5568750
## Airtraq                  0.5169264
## Kingvision_Channeled     0.4311902
## C_MAC_D                  0.3799588
## Glidescope               0.3787646
## Pentax_AWS_Miller        0.3656171
## Truview                  0.3596366
## McGrath_MAC              0.3277634
## Macintosh                0.2356786
## McGrath_Series_5         0.1373939
meta::metaprop(event = net.two$responders[net.two$treatment=="Macintosh"],n= net.two$sampleSize[net.two$treatment=="Macintosh"], studlab = net.two$study[net.two$treatment=="Macintosh"],data = net.two[net.two$treatment=="Macintosh",],method = "GLMM",sm = "PLOGIT",comb.fixed = FALSE,comb.random = TRUE,hakn = TRUE)
##                        proportion           95%-CI
## Abdallah 2011              0.0000 [0.0000; 0.0725]
## Abdallah 2019              0.0000 [0.0000; 0.1000]
## Abdelgalel 2018            0.0750 [0.0157; 0.2039]
## Agrawal 2020               0.0000 [0.0000; 0.0881]
## Ahmad 2016                 0.0000 [0.0000; 0.0377]
## Ahmad 2015                 0.0000 [0.0000; 0.1372]
## Akbar 2015                 0.0444 [0.0054; 0.1515]
## Al - Ghamdi 2016           0.0000 [0.0000; 0.1544]
## Ali 2012                   0.1200 [0.0255; 0.3122]
## Ali 2017                   0.0333 [0.0008; 0.1722]
## Altun 2018                 0.0698 [0.0146; 0.1906]
## Andersen 2011              0.0400 [0.0049; 0.1371]
## Aoi 2010                   0.0556 [0.0014; 0.2729]
## Aqil 2016                  0.0000 [0.0000; 0.0881]
## Aqil 2017                  0.0000 [0.0000; 0.0513]
## Arici 2014                 0.0000 [0.0000; 0.0881]
## Arslan 2017                0.0000 [0.0000; 0.0881]
## Bakshi 2019                0.0000 [0.0000; 0.0974]
## Barak 2007                 0.0222 [0.0027; 0.0780]
## Barman 2017                0.0000 [0.0000; 0.1000]
## Bashir 2020                0.0000 [0.0000; 0.0881]
## Colak 2019                 0.0000 [0.0000; 0.0787]
## El - Tahan 2018            0.0000 [0.0000; 0.1089]
## Huang 2020                 0.1000 [0.0211; 0.2653]
## Wasem 2013                 0.0333 [0.0008; 0.1722]
## Yao 2015                   0.0000 [0.0000; 0.0740]
## Bhalla 2018                0.0000 [0.0000; 0.2180]
## Bharti 2014                0.0526 [0.0013; 0.2603]
## Bhat 2015                  0.0000 [0.0000; 0.0711]
## Bilehjani 2009             0.0000 [0.0000; 0.0925]
## Blajic 2019                0.0000 [0.0000; 0.0606]
## Cakir 2020                 0.0000 [0.0000; 0.1122]
## Carlino 2009               0.0000 [0.0000; 0.2180]
## Dey 2020                   0.1818 [0.1147; 0.2667]
## Erden 2010                 0.0000 [0.0000; 0.2059]
## Erturk 2015                0.0000 [0.0000; 0.0881]
## Goksu 2016                 0.1333 [0.0658; 0.2316]
## Griesdale 2012             0.1000 [0.0123; 0.3170]
## Gunes 2020                 0.0000 [0.0000; 0.0402]
## Gupta 2020                 0.0000 [0.0000; 0.0881]
## Hirabayashi 2009           0.0938 [0.0610; 0.1363]
## Hirabayashi 2010           0.1000 [0.0490; 0.1762]
## Hsu 2012                   0.0667 [0.0082; 0.2207]
## Hu 2017                    0.0000 [0.0000; 0.0377]
## Kaur 2020                  0.0000 [0.0000; 0.0881]
## Kido 2015                  0.0400 [0.0010; 0.2035]
## Kim 2013                   0.0000 [0.0000; 0.1482]
## Kleine-Brueggeney 2017     0.0000 [0.0000; 0.0303]
## Koh 2010                   0.1600 [0.0454; 0.3608]
## Kucukosman 2020            0.0000 [0.0000; 0.1157]
## Laosuwan 2015              0.0000 [0.0000; 0.2849]
## Lascarrou 2017             0.0659 [0.0345; 0.1123]
## Lim 2005                   0.0000 [0.0000; 0.1157]
## Lin 2012                   0.0732 [0.0273; 0.1525]
## Macke 2020                 0.0395 [0.0082; 0.1111]
## Maharaj 2006               0.0333 [0.0008; 0.1722]
## Maharaj 2007               0.0500 [0.0013; 0.2487]
## Maharaj 2008               0.2000 [0.0573; 0.4366]
## Malik 2008                 0.0667 [0.0082; 0.2207]
## Malik_1 2009               0.1600 [0.0454; 0.3608]
## Malik_2 2009               0.0000 [0.0000; 0.1157]
## Mcelwain 2011              0.0645 [0.0079; 0.2142]
## Myunghun-Kim 2017          0.0000 [0.0000; 0.1684]
## Kulkarni 2013              0.0000 [0.0000; 0.1157]
## Inangil 2018               0.0000 [0.0000; 0.1000]
## Ing 2017                   0.0000 [0.0000; 0.2059]
## Jafra 2018                 0.0000 [0.0000; 0.0362]
## Ndoko 2008                 0.0000 [0.0000; 0.0672]
## Nishiyama 2011             0.0571 [0.0070; 0.1916]
## Parasa 2016                0.0000 [0.0000; 0.1157]
## Pazur 2016                 0.0000 [0.0000; 0.1323]
## Peirovifar 2014            0.4000 [0.1912; 0.6395]
## Pournajafian 2014          0.0000 [0.0000; 0.0725]
## Ranieri 2012               0.0156 [0.0004; 0.0840]
## Reena 2019                 0.0000 [0.0000; 0.0711]
## Risse 2020                 0.0323 [0.0008; 0.1670]
## Ruetzeler 2020             0.0794 [0.0263; 0.1756]
## Sargin 2016                0.0000 [0.0000; 0.0711]
## Shah 2016                  0.0690 [0.0085; 0.2277]
## Shukla 2017                0.0750 [0.0157; 0.2039]
## Takenaka 2011              0.0000 [0.0000; 0.1028]
## Teoh 2010                  0.0000 [0.0000; 0.0362]
## Tsan 2020                  0.0000 [0.0000; 0.0521]
## Varsha 2019                0.0000 [0.0000; 0.1000]
## Vijayakumar 2016           0.0000 [0.0000; 0.0787]
## Yoo 2018                   0.0000 [0.0000; 0.1544]
## El-Tahan 2017              0.0000 [0.0000; 0.2316]
## Lee 2012                   0.0400 [0.0010; 0.2035]
## Ferrando 2011              0.0000 [0.0000; 0.1157]
## Serocki 2013               0.0000 [0.0000; 0.1089]
## Arora 2013                 0.0000 [0.0000; 0.0660]
## 
## Number of studies combined: k = 91
## 
##                      proportion           95%-CI
## Random effects model     0.0105 [0.0056; 0.0195]
## 
## Quantifying heterogeneity:
##  tau^2 = 3.1146; tau = 1.7648; I^2 = 84.2%; H = 2.52
## 
## Test of heterogeneity:
##       Q d.f.  p-value             Test
##   57.84   90   0.9966        Wald-type
##  318.37   90 < 0.0001 Likelihood-Ratio
## 
## Details on meta-analytical method:
## - Random intercept logistic regression model
## - Maximum-likelihood estimator for tau^2
## - Hartung-Knapp adjustment for random effects model
## - Logit transformation
## - Clopper-Pearson confidence interval for individual studies
## - Continuity correction of 0.5 in studies with zero cell frequencies
##   (only used to calculate individual study results)
#Funnel Plot for failed intubation within 2 attempts
funnel.two<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Tables Funnel Plots/Funnel Network 2 attempts.csv")
funnel.two_analysis<-dplyr::filter(funnel.two,funnel.two$ftent.e1>0 | funnel.two$ftent.e2>0)
mbin_funnel.two_random<-meta::metabin(ftent.e1,ftent.t1,ftent.e2,ftent.t2,data = funnel.two_analysis,studlab =study,comb.fixed = FALSE,comb.random = TRUE,method.tau = "SJ",hakn = TRUE,prediction = TRUE,incr = 0.5,sm = "RR")
mbin_funnel.two_random
##                        RR             95%-CI %W(random)
## Abdallah 2011      6.8614 [0.3638; 129.4185]        0.9
## Abdelgalel 2018    0.1429 [0.0076;   2.6781]        0.9
## Abdelgalel 2018    0.3333 [0.0362;   3.0701]        1.3
## Ahmed 2017         0.3333 [0.0141;   7.8648]        0.8
## Akbar 2015         0.2000 [0.0099;   4.0511]        0.9
## Akbas 2019         1.0000 [0.1480;   6.7554]        1.5
## Ali 2012           0.3333 [0.0371;   2.9910]        1.3
## Ali 2015           3.0000 [0.1281;  70.2318]        0.8
## Ali 2017           0.3333 [0.0141;   7.8648]        0.8
## Altun 2018         0.1534 [0.0082;   2.8796]        0.9
## Altun 2018         0.6825 [0.1200;   3.8806]        1.7
## Andersen 2011      0.2000 [0.0098;   4.0624]        0.9
## Aoi 2010           1.0000 [0.0676;  14.7865]        1.0
## Arici 2014         5.0000 [0.2477; 100.9273]        0.9
## Arslan 2015        0.3333 [0.0140;   7.9424]        0.8
## Barak 2007         0.5625 [0.0520;   6.0868]        1.2
## Belze 2017         2.5000 [0.5183;  12.0576]        1.8
## Bhalla 2018        7.0000 [0.3936; 124.4923]        0.9
## Bharti 2014        0.3171 [0.0137;   7.3255]        0.8
## Brozek 2020        7.0000 [0.3702; 132.3759]        0.9
## Bruck 2015         3.4528 [0.1468;  81.2229]        0.8
## Cattano 2012       2.0000 [0.1914;  20.8980]        1.2
## Cavus 2018         0.2071 [0.0102;   4.2191]        0.9
## Chanchayanon 2018  5.0000 [0.2555;  97.8510]        0.9
## Dey 2020           0.1528 [0.0468;   0.4992]        2.1
## El - Tahan 2018    4.7101 [0.2349;  94.4475]        0.9
## El - Tahan 2018   10.0704 [0.5792; 175.0884]        1.0
## El - Tahan 2018    3.0000 [0.1268;  70.9570]        0.8
## Erden 2010         3.0000 [0.1315;  68.4178]        0.8
## Goksu 2016         0.6000 [0.2297;   1.5673]        2.4
## Griesdale 2012     1.5000 [0.2800;   8.0356]        1.7
## Hirabayashi 2009   0.0404 [0.0055;   0.2965]        1.5
## Hirabayashi 2010   0.1000 [0.0130;   0.7666]        1.4
## Hsu 2012           0.2000 [0.0100;   3.9955]        0.9
## Huang 2020         2.0690 [0.5704;   7.5048]        2.0
## Huang 2020         1.0000 [0.2191;   4.5639]        1.8
## Kido 2015          0.3333 [0.0142;   7.8035]        0.8
## Koh 2010           0.2500 [0.0300;   2.0828]        1.4
## Lange 2009         0.2000 [0.0100;   3.9955]        0.9
## Lascarrou 2017     0.9117 [0.4129;   2.0127]        2.5
## Lee 2012           8.0000 [1.0788;  59.3243]        1.4
## Lee 2012          18.0000 [2.5974; 124.7401]        1.5
## Lee 2012           1.0000 [0.0662;  15.1168]        1.0
## Lin 2012           0.1647 [0.0203;   1.3378]        1.4
## Liu 2009           0.1111 [0.0062;   1.9882]        0.9
## Liu 2019           0.0481 [0.0028;   0.8155]        1.0
## Macke 2020         0.1429 [0.0075;   2.7190]        0.9
## Maharaj 2006       0.3333 [0.0141;   7.8648]        0.8
## Maharaj 2007       0.3333 [0.0144;   7.7130]        0.8
## Maharaj 2008       0.1111 [0.0064;   1.9341]        1.0
## Malik 2008         1.0000 [0.1505;   6.6426]        1.5
## Malik 2008         0.2000 [0.0100;   3.9955]        0.9
## Malik 2008         0.2000 [0.0100;   3.9955]        0.9
## Malik1 2009        0.1111 [0.0063;   1.9591]        0.9
## Malik1 2009        0.1111 [0.0063;   1.9591]        0.9
## Markham 2019       0.4000 [0.0801;   1.9977]        1.8
## Markham 2019       0.0909 [0.0051;   1.6153]        0.9
## Massen 2009        0.1316 [0.0565;   0.3066]        2.5
## Massen 2009        0.7895 [0.5998;   1.0391]        2.8
## Mcelwain 2011      0.5345 [0.0512;   5.5845]        1.2
## Mcelwain 2011      0.2136 [0.0107;   4.2661]        0.9
## Mendonca 2018      1.0408 [0.0669;  16.1827]        1.0
## Ng 2012            5.0000 [0.6006;  41.6277]        1.4
## Nishiyama 2011     0.4861 [0.0461;   5.1224]        1.2
## Nishiyama 2011     3.0882 [0.6693;  14.2496]        1.8
## Nishiyama 2011     1.9444 [0.3801;   9.9477]        1.7
## Parasa 2016        2.9048 [0.1230;  68.5796]        0.8
## Peirovifar 2014    0.5000 [0.1789;   1.3975]        2.3
## Ranieri 2012       0.3139 [0.0130;   7.5664]        0.8
## Raza 2017          0.5000 [0.0479;   5.2245]        1.2
## Risse 2020         2.7353 [0.3000;  24.9395]        1.3
## Ruetzeler 2020     0.3818 [0.0769;   1.8970]        1.8
## Serocki 2013       3.0000 [0.1268;  70.9570]        0.8
## Serocki 2013       3.0952 [0.1309;  73.1662]        0.8
## Shah 2016          0.4833 [0.0463;   5.0456]        1.2
## Shim 2016          1.0345 [0.1559;   6.8637]        1.5
## Shukla 2017        0.1429 [0.0076;   2.6781]        0.9
## Teoh 2009          0.3333 [0.0138;   8.0440]        0.8
## Teoh 2010          3.0000 [0.1237;  72.7665]        0.8
## Vargas 2017        0.5000 [0.0490;   5.1031]        1.2
## Wan 2016           2.0000 [0.1880;  21.2801]        1.2
## Wasem 2013         1.0000 [0.0655;  15.2598]        1.0
## 
## Number of studies combined: k = 82
## 
##                          RR           95%-CI     t p-value
## Random effects model 0.6829 [0.5078; 0.9184] -2.56  0.0123
## Prediction interval         [0.0873; 5.3407]              
## 
## Quantifying heterogeneity:
##  tau^2 = 1.0460 [0.0761; 1.1037]; tau = 1.0228 [0.2759; 1.0506];
##  I^2 = 32.3% [10.9%; 48.6%]; H = 1.22 [1.06; 1.39]
## 
## Test of heterogeneity:
##       Q d.f. p-value
##  119.66   81  0.0034
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Continuity correction of 0.5 in studies with zero cell frequencies
meta::funnel(mbin_funnel.two_random,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))

dmetar::eggers.test(mbin_funnel.two_random)
##              Intercept ConfidenceInterval      t       p
## Egger's test    -0.066       -0.458-0.326 -0.312 0.75599

Network meta-analysis of difficult intubation

net.difficult<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Table Analysis Network Difficult.csv")
sum(net.difficult$sampleSize)
## [1] 4409
table(net.difficult$treatment)
## 
##                  Airtraq                  C_MAC_D                C_MAC_MAC 
##                       15                        7                        7 
##               Glidescope            imago_v_blade     Kingvision_Channeled 
##                        7                        1                        6 
## Kingvision_Non_channeled                Macintosh              McGrath_MAC 
##                        2                       36                        4 
##         McGrath_Series_5               Pentax_AWS                  Truview 
##                        5                        9                        8
id<-c("Airtraq","C_MAC_D","C_MAC_MAC","Glidescope","imago_v_blade","Kingvision_Channeled","Kingvision_Non_channeled","Macintosh","McGrath_MAC","McGrath_Series_5","Pentax_AWS","Truview")
description<-c("Airtraq","C-MAC D","C-MAC","Glidescope","Imago V-Blade","KingVision Channeled","KingVision Non-channeled","Macintosh","McGrath MAC","McGrath Series 5", "Pentax AWS","Truview")
treat.codes.difficult<-data.frame(id,description)
graph.difficult<-gemtc::mtc.network(data.ab = net.difficult,treatments = treat.codes.difficult)
summary(graph.difficult)
## $Description
## [1] "MTC dataset: Network"
## 
## $`Studies per treatment`
##                  Airtraq                  C_MAC_D                C_MAC_MAC 
##                       15                        7                        7 
##               Glidescope            imago_v_blade     Kingvision_Channeled 
##                        7                        1                        6 
## Kingvision_Non_channeled                Macintosh              McGrath_MAC 
##                        2                       36                        4 
##         McGrath_Series_5               Pentax_AWS                  Truview 
##                        5                        9                        8 
## 
## $`Number of n-arm studies`
## 2-arm 3-arm 4-arm 
##    41     7     1 
## 
## $`Studies per treatment comparison`
##                      t1                       t2 nr
## 1               Airtraq                C_MAC_MAC  2
## 2               Airtraq                Macintosh 12
## 3               Airtraq         McGrath_Series_5  1
## 4               Airtraq               Pentax_AWS  1
## 5               C_MAC_D                C_MAC_MAC  1
## 6               C_MAC_D               Glidescope  1
## 7               C_MAC_D     Kingvision_Channeled  2
## 8               C_MAC_D Kingvision_Non_channeled  1
## 9               C_MAC_D                Macintosh  3
## 10              C_MAC_D                  Truview  1
## 11            C_MAC_MAC               Glidescope  1
## 12            C_MAC_MAC Kingvision_Non_channeled  1
## 13            C_MAC_MAC                Macintosh  2
## 14            C_MAC_MAC              McGrath_MAC  1
## 15            C_MAC_MAC         McGrath_Series_5  2
## 16           Glidescope            imago_v_blade  1
## 17           Glidescope     Kingvision_Channeled  1
## 18           Glidescope Kingvision_Non_channeled  1
## 19           Glidescope                Macintosh  3
## 20           Glidescope         McGrath_Series_5  1
## 21           Glidescope               Pentax_AWS  2
## 22           Glidescope                  Truview  1
## 23 Kingvision_Channeled Kingvision_Non_channeled  1
## 24 Kingvision_Channeled                Macintosh  3
## 25            Macintosh              McGrath_MAC  2
## 26            Macintosh         McGrath_Series_5  2
## 27            Macintosh               Pentax_AWS  7
## 28            Macintosh                  Truview  8
## 29          McGrath_MAC               Pentax_AWS  1
## 30          McGrath_MAC                  Truview  1
## 31           Pentax_AWS                  Truview  1
plot(graph.difficult,use.description = TRUE,vertex.shape="circle",vertex.color="white",vertex.size=15,vertex.label.color="dark blue",vertex.label.cex=1.0,edge.color="light blue")
plot(graph.difficult,use.description = TRUE,vertex.shape="circle",vertex.color="white",vertex.size=15,vertex.label.color="dark blue",vertex.label.cex=1.0,edge.color="light blue",vertex.size=graph.fail$sampleSize)

model.difficult <- mtc.model(graph.difficult,likelihood = "binom",link = "logit",linearModel = "random",n.chain = 4)
mcmc.difficult.1 <- mtc.run(model.difficult, n.adapt = 50, n.iter = 10000, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 107
##    Unobserved stochastic nodes: 119
##    Total graph size: 2299
## 
## Initializing model
## Warning in rjags::jags.model(file.model, data = syntax[["data"]], inits =
## syntax[["inits"]], : Adaptation incomplete
## NOTE: Stopping adaptation
summary(mcmc.difficult.1)
## 
## Results on the Log Odds Ratio scale
## 
## Iterations = 60:10050
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 1000 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                         Mean     SD Naive SE Time-series SE
## d.C_MAC_MAC.Airtraq                  -0.6817 0.6734 0.010647       0.013300
## d.C_MAC_MAC.C_MAC_D                  -0.1648 0.8077 0.012770       0.015255
## d.C_MAC_MAC.Glidescope                0.4512 0.7752 0.012257       0.016586
## d.C_MAC_MAC.Kingvision_Non_channeled -0.0258 1.0345 0.016357       0.017901
## d.C_MAC_MAC.Macintosh                 1.9435 0.6107 0.009656       0.012320
## d.C_MAC_MAC.McGrath_MAC               0.6939 0.8726 0.013797       0.016053
## d.C_MAC_MAC.McGrath_Series_5          1.0988 0.7720 0.012206       0.013641
## d.Glidescope.imago_v_blade            1.5423 1.7169 0.027146       0.029966
## d.Glidescope.Kingvision_Channeled    -0.5689 0.8206 0.012976       0.013377
## d.Glidescope.Pentax_AWS              -1.2412 0.7601 0.012019       0.013037
## d.Glidescope.Truview                  1.0186 0.8037 0.012708       0.012603
## sd.d                                  1.4077 0.2149 0.003398       0.003858
## 
## 2. Quantiles for each variable:
## 
##                                         2.5%      25%      50%      75%  97.5%
## d.C_MAC_MAC.Airtraq                  -2.0094 -1.11852 -0.67612 -0.23771 0.6452
## d.C_MAC_MAC.C_MAC_D                  -1.7169 -0.70329 -0.16810  0.36421 1.4554
## d.C_MAC_MAC.Glidescope               -1.0811 -0.07564  0.45603  0.97456 1.9756
## d.C_MAC_MAC.Kingvision_Non_channeled -2.0178 -0.72020 -0.03956  0.64732 2.0089
## d.C_MAC_MAC.Macintosh                 0.7535  1.53643  1.93475  2.34326 3.1424
## d.C_MAC_MAC.McGrath_MAC              -1.0393  0.12755  0.69590  1.27230 2.4491
## d.C_MAC_MAC.McGrath_Series_5         -0.4321  0.60276  1.08016  1.60017 2.6480
## d.Glidescope.imago_v_blade           -1.7629  0.38759  1.53086  2.68546 4.8589
## d.Glidescope.Kingvision_Channeled    -2.2098 -1.10704 -0.56882 -0.02764 1.0228
## d.Glidescope.Pentax_AWS              -2.7607 -1.73498 -1.22946 -0.73647 0.2661
## d.Glidescope.Truview                 -0.5410  0.46674  1.00845  1.55213 2.5784
## sd.d                                  1.0383  1.25392  1.39002  1.54108 1.8799
## 
## -- Model fit (residual deviance):
## 
##     Dbar       pD      DIC 
## 103.7446  89.3012 193.0458 
## 
## 107 data points, ratio 0.9696, I^2 = 0%
mcmc.difficult.2 <- mtc.run(model.difficult, n.adapt = 5000, n.iter = 1e5, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 107
##    Unobserved stochastic nodes: 119
##    Total graph size: 2299
## 
## Initializing model
summary(mcmc.difficult.2)
## 
## Results on the Log Odds Ratio scale
## 
## Iterations = 5010:105000
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 10000 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                          Mean     SD Naive SE Time-series SE
## d.C_MAC_MAC.Airtraq                  -0.68540 0.6764 0.003382       0.004253
## d.C_MAC_MAC.C_MAC_D                  -0.16973 0.7988 0.003994       0.004664
## d.C_MAC_MAC.Glidescope                0.43888 0.7747 0.003873       0.005126
## d.C_MAC_MAC.Kingvision_Non_channeled -0.04574 1.0343 0.005171       0.005645
## d.C_MAC_MAC.Macintosh                 1.92992 0.6120 0.003060       0.004037
## d.C_MAC_MAC.McGrath_MAC               0.69709 0.8755 0.004378       0.004875
## d.C_MAC_MAC.McGrath_Series_5          1.08325 0.7827 0.003913       0.004575
## d.Glidescope.imago_v_blade            1.52613 1.7186 0.008593       0.009431
## d.Glidescope.Kingvision_Channeled    -0.58594 0.8205 0.004102       0.004386
## d.Glidescope.Pentax_AWS              -1.24556 0.7698 0.003849       0.004180
## d.Glidescope.Truview                  1.02010 0.7929 0.003964       0.004339
## sd.d                                  1.40186 0.2168 0.001084       0.001351
## 
## 2. Quantiles for each variable:
## 
##                                         2.5%      25%      50%      75%  97.5%
## d.C_MAC_MAC.Airtraq                  -2.0218 -1.13278 -0.68322 -0.23521 0.6342
## d.C_MAC_MAC.C_MAC_D                  -1.7571 -0.69769 -0.16732  0.36281 1.3837
## d.C_MAC_MAC.Glidescope               -1.0804 -0.07628  0.43571  0.94552 1.9855
## d.C_MAC_MAC.Kingvision_Non_channeled -2.0855 -0.72589 -0.04853  0.64852 1.9904
## d.C_MAC_MAC.Macintosh                 0.7346  1.52947  1.92474  2.32958 3.1393
## d.C_MAC_MAC.McGrath_MAC              -1.0348  0.12338  0.69995  1.27337 2.4228
## d.C_MAC_MAC.McGrath_Series_5         -0.4601  0.56524  1.08026  1.60290 2.6263
## d.Glidescope.imago_v_blade           -1.7960  0.37177  1.50100  2.65726 4.9510
## d.Glidescope.Kingvision_Channeled    -2.2200 -1.11691 -0.58351 -0.04718 1.0207
## d.Glidescope.Pentax_AWS              -2.7862 -1.74678 -1.23538 -0.72655 0.2381
## d.Glidescope.Truview                 -0.5557  0.49662  1.02030  1.54983 2.5647
## sd.d                                  1.0314  1.24958  1.38374  1.53364 1.8771
## 
## -- Model fit (residual deviance):
## 
##      Dbar        pD       DIC 
## 103.57911  89.08606 192.66517 
## 
## 107 data points, ratio 0.968, I^2 = 0%
gelman.plot(mcmc.difficult.1)

gelman.plot(mcmc.difficult.2)

gelman.diag(mcmc.difficult.1)$mpsrf
## [1] 1.007118
gelman.diag(mcmc.difficult.2)$mpsrf
## [1] 1.000571
anohe.difficult<-gemtc::mtc.anohe(graph.difficult,n.adapt = 5000, n.iter = 1e5, thin = 10,sampler="rjags")
summary(anohe.difficult)
nodesplit.difficult <- gemtc::mtc.nodesplit(graph.difficult, linearModel = "random", likelihood = "binom",link = "logit",n.adapt = 5000, n.iter = 1e5, thin = 10)
summary(nodesplit.difficult)
forest(gemtc::relative.effect(mcmc.difficult.2, t1 = "Macintosh"), use.description = TRUE,xlim = c(-10, 10))

rank.probability.difficult <- gemtc::rank.probability(mcmc.difficult.2, preferredDirection = -1)
plot(rank.probability.difficult, beside=TRUE)

sucra.difficult <- dmetar::sucra(rank.probability.difficult, lower.is.better = FALSE)
sucra.difficult
##                               SUCRA
## Pentax_AWS               0.86677500
## Airtraq                  0.84597500
## C_MAC_D                  0.68015909
## Kingvision_Channeled     0.67115909
## Kingvision_Non_channeled 0.63023636
## C_MAC_MAC                0.62611136
## Glidescope               0.47856136
## McGrath_MAC              0.40716818
## McGrath_Series_5         0.29582500
## imago_v_blade            0.21427273
## Truview                  0.20087955
## Macintosh                0.08287727
meta::metaprop(event = net.difficult$responders[net.difficult$treatment=="Macintosh"],n= net.difficult$sampleSize[net.difficult$treatment=="Macintosh"], studlab = net.difficult$study[net.difficult$treatment=="Macintosh"],data = net.difficult[net.difficult$treatment=="Macintosh",],method = "GLMM",sm = "PLOGIT",comb.fixed = FALSE,comb.random = TRUE,hakn = TRUE)
##                        proportion           95%-CI
## Agrawal 2020               1.0000 [0.9119; 1.0000]
## Ahmad 2016                 1.0000 [0.9623; 1.0000]
## Ali 2017                   0.8000 [0.6143; 0.9229]
## Aoi 2010                   0.9412 [0.7131; 0.9985]
## Arima 2014                 0.6415 [0.4980; 0.7686]
## Barak 2007                 0.0778 [0.0318; 0.1537]
## Bhalla 2018                0.4667 [0.2127; 0.7341]
## Bhandari 2013              0.5750 [0.4089; 0.7296]
## Bharti 2014                0.7368 [0.4880; 0.9085]
## Chandrashekaraiah 2017     0.7667 [0.5772; 0.9007]
## Di Marco 2011              0.6296 [0.4874; 0.7571]
## Hamp 2015                  0.2000 [0.0573; 0.4366]
## Hosalli 2017               0.8000 [0.6143; 0.9229]
## Kaur 2020                  0.3500 [0.2063; 0.5168]
## Kim 2013                   1.0000 [0.8518; 1.0000]
## Kulkarni 2013              0.1000 [0.0211; 0.2653]
## Kumar_2 2019               0.9333 [0.7793; 0.9918]
## Kunaz 2016                 0.0600 [0.0125; 0.1655]
## Lascarrou 2017             0.0757 [0.0420; 0.1237]
## Maharaj 2006               0.5333 [0.3433; 0.7166]
## Maharaj 2007               0.7000 [0.4572; 0.8811]
## Maharaj 2008               1.0000 [0.8316; 1.0000]
## Malik 2008                 0.8667 [0.6928; 0.9624]
## Malik_1 2009               0.9600 [0.7965; 0.9990]
## Malik_2 2009               0.7333 [0.5411; 0.8772]
## Mcelwain 2011              0.8065 [0.6253; 0.9255]
## Ndoko 2008                 0.0943 [0.0313; 0.2066]
## Pappu 2020                 0.8667 [0.6928; 0.9624]
## Pazur 2016                 0.3462 [0.1721; 0.5567]
## Reena 2019                 0.0800 [0.0222; 0.1923]
## Ruetzeler 2020             0.1111 [0.0459; 0.2156]
## Takenaka 2011              0.0000 [0.0000; 0.1028]
## Tolon 2012                 0.7000 [0.4572; 0.8811]
## Vijayakumar 2016           0.7778 [0.6291; 0.8880]
## Wasem 2013                 0.5333 [0.3433; 0.7166]
## Yao 2015                   0.0833 [0.0232; 0.1998]
## 
## Number of studies combined: k = 36
## 
##                      proportion           95%-CI
## Random effects model     0.6130 [0.4169; 0.7782]
## 
## Quantifying heterogeneity:
##  tau^2 = 5.4432; tau = 2.3331; I^2 = 96.3%; H = 5.18
## 
## Test of heterogeneity:
##       Q d.f.  p-value             Test
##  384.41   35 < 0.0001        Wald-type
##  961.97   35 < 0.0001 Likelihood-Ratio
## 
## Details on meta-analytical method:
## - Random intercept logistic regression model
## - Maximum-likelihood estimator for tau^2
## - Hartung-Knapp adjustment for random effects model
## - Logit transformation
## - Clopper-Pearson confidence interval for individual studies
## - Continuity correction of 0.5 in studies with zero cell frequencies
##   (only used to calculate individual study results)
results.difficult<-gemtc::relative.effect.table(mcmc.difficult.2)
write.csv2(results.difficult,file = "results_difficult.csv")


#Funnel Plot for difficult intubation
funnel.difficult<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Tables Funnel Plots/Funnel Network Difficult.csv")
funnel.difficult_analysis<-dplyr::filter(funnel.difficult,funnel.difficult$fdif.e1>0 | funnel.difficult$fdif.e2>0)
mbin_funnel.difficult_random<-meta::metabin(fdif.e1,fdif.t1,fdif.e2,fdif.t2,data = funnel.difficult_analysis,studlab =study,comb.fixed = FALSE,comb.random = TRUE,method.tau = "SJ",hakn = TRUE,prediction = TRUE,incr = 0.5,sm = "RR")
mbin_funnel.difficult_random
##                            RR            95%-CI %W(random)
## Agrawal 2020           0.3827 [0.2588;  0.5659]        2.2
## Ahmad 2016             1.0000 [0.9774;  1.0231]        2.4
## Ahmed 2017             0.7500 [0.2960;  1.9003]        1.7
## Akbas 2019             2.1667 [0.9147;  5.1323]        1.7
## Ali 2017               0.7083 [0.4940;  1.0157]        2.3
## Aoi 2010               0.1875 [0.0667;  0.5272]        1.6
## Arima 2014             0.6235 [0.3964;  0.9807]        2.2
## Barak 2007             0.8036 [0.2655;  2.4320]        1.5
## Bhalla 2018            1.7143 [0.9434;  3.1151]        2.0
## Bhandari 2013          0.0435 [0.0062;  0.3067]        0.8
## Bharti 2014            0.6107 [0.3509;  1.0628]        2.1
## Chandrashekaraiah 2017 0.9565 [0.7140;  1.2815]        2.3
## Di Marco 2011          0.3824 [0.2282;  0.6405]        2.1
## Hamp 2015              0.2941 [0.0362;  2.3868]        0.7
## Hosalli 2017           0.3333 [0.1794;  0.6195]        2.0
## Kaur 2020              0.2857 [0.1029;  0.7932]        1.6
## Kaur 2020              0.3571 [0.1420;  0.8982]        1.7
## Kim 2013               0.1556 [0.0594;  0.4074]        1.6
## Kulkarni 2013          0.6667 [0.1198;  3.7087]        1.0
## Kumar1 2019            0.2000 [0.0250;  1.6004]        0.7
## Kumar2 2019            0.1786 [0.0798;  0.3997]        1.8
## Kunaz 2016             1.3333 [0.3144;  5.6542]        1.2
## Lascarrou 2017         0.9946 [0.4879;  2.0277]        1.9
## Lee 2017               1.0192 [0.8418;  1.2341]        2.4
## Maharaj 2006           0.3125 [0.1313;  0.7439]        1.7
## Maharaj 2007           0.0714 [0.0103;  0.4930]        0.8
## Maharaj 2008           0.2683 [0.1313;  0.5484]        1.9
## Malik 2008             0.8846 [0.6943;  1.1271]        2.3
## Malik 2008             0.5000 [0.3244;  0.7706]        2.2
## Malik 2008             0.2692 [0.1386;  0.5228]        2.0
## Malik1 2009            0.4583 [0.2924;  0.7184]        2.2
## Malik1 2009            0.3750 [0.2210;  0.6363]        2.1
## Malik2 2009            0.1818 [0.0712;  0.4643]        1.7
## Markham 2019           1.6732 [1.0865;  2.5769]        2.2
## Markham 2019           1.3014 [0.8168;  2.0735]        2.2
## Massen 2009            0.1282 [0.0551;  0.2983]        1.8
## Massen 2009            0.7436 [0.5631;  0.9819]        2.3
## Mcelwain 2011          1.0262 [0.8077;  1.3038]        2.3
## Mcelwain 2011          0.3848 [0.2178;  0.6800]        2.1
## Mendonca 2018          0.7980 [0.5479;  1.1621]        2.2
## Mendonca 2018          0.6684 [0.4040;  1.1057]        2.1
## Ndoko 2008             0.0909 [0.0052;  1.6036]        0.5
## Ng 2012                0.6462 [0.2442;  1.7099]        1.6
## Pappu 2020             0.9231 [0.7353;  1.1588]        2.3
## Pappu 2020             0.3077 [0.1672;  0.5662]        2.0
## Pazur 2016             0.8889 [0.4067;  1.9430]        1.8
## Raza 2017              0.7778 [0.3330;  1.8165]        1.8
## Reena 2019             0.5000 [0.0959;  2.6074]        1.0
## Ruetzeler 2020         0.5455 [0.1678;  1.7733]        1.4
## Shravanalakshmi 2017   1.4000 [0.6963;  2.8148]        1.9
## Shravanalakshmi 2017   2.1000 [1.1189;  3.9412]        2.0
## Tolon 2012             0.2857 [0.1136;  0.7186]        1.7
## Vargas 2017            3.0000 [0.6818; 13.1999]        1.1
## Vijayakumar 2016       0.2000 [0.0995;  0.4021]        1.9
## Wasem 2013             0.5000 [0.2530;  0.9882]        1.9
## Yao 2015               1.2500 [0.3573;  4.3736]        1.3
## 
## Number of studies combined: k = 56
## 
##                          RR           95%-CI     t  p-value
## Random effects model 0.5606 [0.4547; 0.6912] -5.54 < 0.0001
## Prediction interval         [0.1320; 2.3806]               
## 
## Quantifying heterogeneity:
##  tau^2 = 0.5094 [0.2463; 0.7749]; tau = 0.7137 [0.4962; 0.8803];
##  I^2 = 83.0% [78.6%; 86.5%]; H = 2.42 [2.16; 2.72]
## 
## Test of heterogeneity:
##       Q d.f.  p-value
##  323.41   55 < 0.0001
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Continuity correction of 0.5 in studies with zero cell frequencies
meta::funnel(mbin_funnel.difficult_random,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))

dmetar::eggers.test(mbin_funnel.difficult_random)
##              Intercept ConfidenceInterval      t p
## Egger's test    -1.664      -2.252--1.076 -6.072 0

Network meta-analysis of percentage of glottic opening (POGO)

net.pogo<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Table Analysis Network POGO.csv")
sum(net.pogo$sampleSize)
## [1] 2115
table(net.pogo$treatment)
## 
##                  Airtraq                  C_MAC_D                C_MAC_MAC 
##                        6                        2                        4 
##               Glidescope     Kingvision_Channeled Kingvision_Non_channeled 
##                        7                        1                        1 
##                Macintosh              McGrath_MAC         McGrath_Series_5 
##                       18                        1                        4 
##                  Truview 
##                        1
id<-c("Airtraq","C_MAC_D","C_MAC_MAC","Glidescope","Kingvision_Channeled","Kingvision_Non_channeled","Macintosh","McGrath_MAC","McGrath_Series_5","Truview")
description<-c("Airtraq","C-MAC D","C-MAC","Glidescope","King Vision Channeled","King Vision Non-channeled","Macintosh","McGrath MAC","McGrath Series 5","Truview")
treat.codes.pogo<-data.frame(id,description)
graph.pogo<-gemtc::mtc.network(data.ab = net.pogo,treatments = treat.codes.pogo)
summary(graph.pogo)
## $Description
## [1] "MTC dataset: Network"
## 
## $`Studies per treatment`
##                  Airtraq                  C_MAC_D                C_MAC_MAC 
##                        6                        2                        4 
##               Glidescope     Kingvision_Channeled Kingvision_Non_channeled 
##                        7                        1                        1 
##                Macintosh              McGrath_MAC         McGrath_Series_5 
##                       18                        1                        4 
##                  Truview 
##                        1 
## 
## $`Number of n-arm studies`
## 2-arm 3-arm 4-arm 
##    19     1     1 
## 
## $`Studies per treatment comparison`
##            t1                       t2 nr
## 1     Airtraq                C_MAC_MAC  1
## 2     Airtraq     Kingvision_Channeled  1
## 3     Airtraq                Macintosh  4
## 4     C_MAC_D                C_MAC_MAC  1
## 5     C_MAC_D Kingvision_Non_channeled  1
## 6     C_MAC_D                Macintosh  1
## 7   C_MAC_MAC               Glidescope  1
## 8   C_MAC_MAC Kingvision_Non_channeled  1
## 9   C_MAC_MAC                Macintosh  2
## 10  C_MAC_MAC         McGrath_Series_5  1
## 11 Glidescope                Macintosh  7
## 12 Glidescope         McGrath_Series_5  1
## 13  Macintosh              McGrath_MAC  1
## 14  Macintosh         McGrath_Series_5  4
## 15  Macintosh                  Truview  1
plot(graph.pogo,use.description = TRUE,vertex.shape="circle",vertex.color="white",vertex.size=15,vertex.label.color="dark blue",vertex.label.cex=1.0,edge.color="light blue")

model.pogo <- gemtc::mtc.model(graph.pogo,likelihood = "normal",link = "identity",linearModel = "random",n.chain = 4)
mcmc.pogo.1 <- gemtc::mtc.run(model.pogo, n.adapt = 50, n.iter = 1000, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 45
##    Unobserved stochastic nodes: 55
##    Total graph size: 686
## 
## Initializing model
## Warning in rjags::jags.model(file.model, data = syntax[["data"]], inits =
## syntax[["inits"]], : Adaptation incomplete
## NOTE: Stopping adaptation
summary(mcmc.pogo.1)
## 
## Results on the Mean Difference scale
## 
## Iterations = 60:1050
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 100 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                         Mean     SD Naive SE Time-series SE
## d.Airtraq.C_MAC_MAC                   -6.866 11.735   0.5867         0.6654
## d.Airtraq.Kingvision_Channeled         7.581 18.132   0.9066         0.9026
## d.Airtraq.Macintosh                  -30.783  8.036   0.4018         0.4028
## d.C_MAC_MAC.C_MAC_D                   -6.393 14.434   0.7217         0.6668
## d.C_MAC_MAC.Glidescope                -8.828 11.599   0.5800         0.5834
## d.C_MAC_MAC.Kingvision_Non_channeled   3.214 17.780   0.8890         0.9797
## d.C_MAC_MAC.McGrath_Series_5          -2.478 12.390   0.6195         0.6572
## d.Macintosh.McGrath_MAC               16.985 19.699   0.9850         0.9348
## d.Macintosh.Truview                   19.933 19.365   0.9683         1.0447
## sd.d                                  17.766  3.862   0.1931         0.2178
## 
## 2. Quantiles for each variable:
## 
##                                        2.5%     25%     50%      75%  97.5%
## d.Airtraq.C_MAC_MAC                  -29.74 -14.656  -5.937   0.6531  14.50
## d.Airtraq.Kingvision_Channeled       -26.62  -3.033   7.165  19.3072  39.81
## d.Airtraq.Macintosh                  -46.36 -36.101 -31.331 -25.4420 -13.51
## d.C_MAC_MAC.C_MAC_D                  -32.77 -15.783  -6.487   3.4696  20.48
## d.C_MAC_MAC.Glidescope               -31.02 -16.507  -8.861  -1.8347  14.62
## d.C_MAC_MAC.Kingvision_Non_channeled -29.37  -9.035   3.357  14.3880  40.71
## d.C_MAC_MAC.McGrath_Series_5         -27.03 -10.041  -2.120   4.7556  22.69
## d.Macintosh.McGrath_MAC              -20.83   5.773  15.784  29.7629  55.47
## d.Macintosh.Truview                  -19.58   7.603  19.978  32.1881  59.17
## sd.d                                  11.75  15.099  17.190  19.9708  26.96
## 
## -- Model fit (residual deviance):
## 
##     Dbar       pD      DIC 
## 45.47545 44.36822 89.84367 
## 
## 45 data points, ratio 1.011, I^2 = 3%
mcmc.pogo.2 <- gemtc::mtc.run(model.pogo, n.adapt = 6000, n.iter = 1e6, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 45
##    Unobserved stochastic nodes: 55
##    Total graph size: 686
## 
## Initializing model
summary(mcmc.pogo.2)
## 
## Results on the Mean Difference scale
## 
## Iterations = 6010:1006000
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 1e+05 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                         Mean     SD Naive SE Time-series SE
## d.Airtraq.C_MAC_MAC                   -7.537 11.506 0.018192       0.019419
## d.Airtraq.Kingvision_Channeled         4.962 18.349 0.029013       0.028951
## d.Airtraq.Macintosh                  -30.771  8.573 0.013555       0.013657
## d.C_MAC_MAC.C_MAC_D                   -5.764 14.266 0.022556       0.022656
## d.C_MAC_MAC.Glidescope                -8.206 11.436 0.018082       0.018555
## d.C_MAC_MAC.Kingvision_Non_channeled   3.419 17.729 0.028031       0.028085
## d.C_MAC_MAC.McGrath_Series_5          -2.291 12.396 0.019600       0.020165
## d.Macintosh.McGrath_MAC               16.985 19.017 0.030069       0.030149
## d.Macintosh.Truview                   20.253 20.018 0.031651       0.043313
## sd.d                                  17.923  3.952 0.006249       0.006572
## 
## 2. Quantiles for each variable:
## 
##                                        2.5%     25%     50%      75%  97.5%
## d.Airtraq.C_MAC_MAC                  -30.45 -14.896  -7.518  -0.1586  15.22
## d.Airtraq.Kingvision_Channeled       -31.56  -6.708   4.948  16.6371  41.46
## d.Airtraq.Macintosh                  -48.00 -36.203 -30.728 -25.2643 -13.83
## d.C_MAC_MAC.C_MAC_D                  -34.14 -14.895  -5.755   3.4109  22.48
## d.C_MAC_MAC.Glidescope               -30.98 -15.521  -8.196  -0.8686  14.49
## d.C_MAC_MAC.Kingvision_Non_channeled -31.69  -7.890   3.427  14.7821  38.61
## d.C_MAC_MAC.McGrath_Series_5         -26.84 -10.238  -2.304   5.6337  22.36
## d.Macintosh.McGrath_MAC              -20.82   4.826  16.983  29.1904  54.69
## d.Macintosh.Truview                  -19.31   7.290  20.246  33.1922  59.86
## sd.d                                  11.93  15.143  17.352  20.0481  27.28
## 
## -- Model fit (residual deviance):
## 
##     Dbar       pD      DIC 
## 45.09893 44.02479 89.12371 
## 
## 45 data points, ratio 1.002, I^2 = 2%
gelman.plot(mcmc.pogo.1)

gelman.plot(mcmc.pogo.2)

gelman.diag(mcmc.pogo.1)$mpsrf
## [1] 1.028809
gelman.diag(mcmc.pogo.2)$mpsrf
## [1] 1.00006
anohe.pogo<-gemtc::mtc.anohe(graph.pogo,n.adapt = 5000, n.iter = 1e5, thin = 10,sampler="rjags")
summary(anohe.pogo)
nodesplit.pogo <- gemtc::mtc.nodesplit(graph.pogo, linearModel = "random", likelihood = "normal",link = "identity",n.adapt = 5000, n.iter = 1e5, thin = 10)
summary(nodesplit.pogo)
forest(relative.effect(mcmc.pogo.2, t1 = "Macintosh"), use.description = TRUE,xlim = c(-60, 60))

rank.probability.pogo <- gemtc::rank.probability(mcmc.pogo.2, preferredDirection = 1)
plot(rank.probability.pogo, beside=TRUE)

sucra.pogo <- dmetar::sucra(rank.probability.pogo, lower.is.better = FALSE)
sucra.pogo
##                               SUCRA
## Kingvision_Channeled     0.75644639
## Airtraq                  0.74536056
## Kingvision_Non_channeled 0.61979167
## C_MAC_MAC                0.56869611
## McGrath_Series_5         0.51447417
## Truview                  0.49754694
## McGrath_MAC              0.43818417
## C_MAC_D                  0.43017250
## Glidescope               0.36421361
## Macintosh                0.06511389
meta::metamean(n = net.pogo$sampleSize[net.pogo$treatment=="Macintosh"],mean= net.pogo$mean[net.pogo$treatment=="Macintosh"], sd=net.pogo$std.dev[net.pogo$treatment=="Macintosh"],studlab = net.pogo$study[net.pogo$treatment=="Macintosh"],data = net.pogo[net.pogo$treatment=="Macintosh",],method.tau = "SJ",sm = "MRAW",comb.fixed = FALSE,comb.random = TRUE,hakn = TRUE)
##                    mean             95%-CI %W(random)
## Abdallah 2019   90.6000 [89.7751; 91.4249]        5.9
## Akbarzadeh 2017 69.0000 [58.9161; 79.0839]        5.4
## Ali 2012        65.0000 [64.2160; 65.7840]        5.9
## Aqil 2016       57.2500 [48.1824; 66.3176]        5.5
## Aqil 2017       43.4000 [36.7939; 50.0061]        5.7
## Arici 2014      84.3700 [79.0708; 89.6692]        5.7
## Choi 2011       67.6000 [58.7614; 76.4386]        5.5
## Dey 2020        54.0000 [49.7019; 58.2981]        5.8
## Dhonneur 2008   75.0000 [69.0986; 80.9014]        5.7
## Khan 2008       77.7000 [62.4479; 92.9521]        4.8
## Kido 2015       71.4000 [63.4033; 79.3967]        5.6
## Koh 2010        15.0000 [ 8.3361; 21.6639]        5.7
## Jafra 2018      75.8500 [70.5642; 81.1358]        5.7
## Ruetzeler 2020  65.8700 [57.4792; 74.2608]        5.5
## Sargin 2016     60.8000 [50.9629; 70.6371]        5.4
## Shah 2016       68.1000 [59.4081; 76.7919]        5.5
## Tsan 2020       80.1400 [74.9420; 85.3380]        5.8
## Yumul 2016      57.0000 [42.5672; 71.4328]        4.9
## 
## Number of studies combined: k = 18
## 
##                         mean             95%-CI
## Random effects model 65.4966 [56.9366; 74.0565]
## 
## Quantifying heterogeneity:
##  tau^2 = 279.0377 [152.5381; 641.4074]; tau = 16.7044 [12.3506; 25.3260];
##  I^2 = 99.3% [99.2%; 99.4%]; H = 12.21 [11.33; 13.16]
## 
## Test of heterogeneity:
##        Q d.f. p-value
##  2535.33   17       0
## 
## Details on meta-analytical method:
## - Inverse variance method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Untransformed (raw) means
results.pogo<-gemtc::relative.effect.table(mcmc.pogo.2)
write.csv2(results.pogo,file = "results_pogo.csv")

#Funnel Plot for POGO
funnel.pogo<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Tables Funnel Plots/Funnel Network POGO.csv")
mcont_funnel.pogo_random<-meta::metacont(fpogo.1,mpogo.1,sdpogo.1,fpogo.2,mpogo.2,sdpogo.2,data=funnel.pogo,studlab = study,comb.fixed = FALSE,comb.random = TRUE,prediction = TRUE,sm="SMD")
mcont_funnel.pogo_random
##                          SMD             95%-CI %W(random)
## Abdallah 2019         2.5911 [ 1.9471;  3.2350]        4.1
## Ahmed 2017           -0.2413 [-0.7493;  0.2667]        4.3
## Akbarzadeh 2017      -0.2385 [-0.7156;  0.2387]        4.3
## Ali 2012             12.3543 [ 9.7713; 14.9373]        1.4
## Ali 2015             -1.6880 [-2.3408; -1.0353]        4.1
## Aqil 2016             1.1848 [ 0.7079;  1.6618]        4.3
## Aqil 2017             1.1739 [ 0.8144;  1.5333]        4.5
## Arici 2014            0.7374 [ 0.2837;  1.1910]        4.3
## Choi 2011             0.9662 [ 0.4295;  1.5030]        4.2
## Dey 2020              1.4882 [ 1.1876;  1.7888]        4.5
## Dhonneur 2008         0.9683 [ 0.6835;  1.2532]        4.5
## Jafra 2018           -0.3196 [-0.5986; -0.0406]        4.5
## Khan 2008             0.7597 [ 0.1458;  1.3736]        4.1
## Kido 2015             0.9630 [ 0.3748;  1.5512]        4.2
## Koh 2010              3.6379 [ 2.7110;  4.5649]        3.6
## Ruetzeler 2020        0.6690 [ 0.3140;  1.0241]        4.5
## Sargin 2016           0.8283 [ 0.4192;  1.2374]        4.4
## Shah 2016             0.8846 [ 0.3527;  1.4165]        4.2
## Shravanalakshmi 2017  0.1209 [-0.2927;  0.5345]        4.4
## Shravanalakshmi 2017 -0.4429 [-0.8614; -0.0245]        4.4
## Tsan 2020             0.3062 [-0.0295;  0.6419]        4.5
## Yumul 2016            0.8221 [ 0.2979;  1.3462]        4.3
## Yumul 2016            0.9458 [ 0.4147;  1.4769]        4.2
## Yumul 2016            1.1101 [ 0.5684;  1.6518]        4.2
## 
## Number of studies combined: k = 24
## 
##                         SMD            95%-CI    z  p-value
## Random effects model 0.8917 [ 0.5230; 1.2604] 4.74 < 0.0001
## Prediction interval         [-0.9589; 2.7423]              
## 
## Quantifying heterogeneity:
##  tau^2 = 0.7609 [1.0311; 3.6917]; tau = 0.8723 [1.0154; 1.9214];
##  I^2 = 93.7% [91.8%; 95.2%]; H = 4.00 [3.50; 4.56]
## 
## Test of heterogeneity:
##       Q d.f.  p-value
##  367.23   23 < 0.0001
## 
## Details on meta-analytical method:
## - Inverse variance method
## - DerSimonian-Laird estimator for tau^2
## - Jackson method for confidence interval of tau^2 and tau
## - Hedges' g (bias corrected standardised mean difference)
meta::funnel(mcont_funnel.pogo_random,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))

dmetar::eggers.test(mcont_funnel.pogo_random)
##              Intercept        ConfidenceInterval     t       p
## Egger's test     4.654 -0.0499999999999998-9.358 1.942 0.06509

Network meta-analysis of difficult laryngoscopy by Cormack and Lehane classification

net.cormack<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Table Analysis Network Cormack.csv")
sum(net.cormack$sampleSize)
## [1] 13749
table(net.cormack$treatment)
## 
##                  Airtraq    Airtraq_Non_channeled                  APA_DAB 
##                       26                        1                        1 
##                  APA_MAC                  C_MAC_D                C_MAC_MAC 
##                        2                       10                       20 
##             C_MAC_Miller                  CEL_100               Glidescope 
##                        1                        1                       39 
##            imago_v_blade     Kingvision_Channeled Kingvision_Non_channeled 
##                        1                       12                        5 
##                Macintosh              McGrath_MAC         McGrath_Series_5 
##                       90                       18                       13 
##               Pentax_AWS        Pentax_AWS_Miller                  Truview 
##                       14                        1                        9 
##              UESCOPE_MAC 
##                        2
id<-c("Airtraq","Airtraq_Non_channeled","APA_DAB","APA_MAC","C_MAC_D","C_MAC_MAC","C_MAC_Miller","CEL_100","Glidescope","imago_v_blade","Kingvision_Channeled","Kingvision_Non_channeled","Macintosh","McGrath_MAC","McGrath_Series_5","Pentax_AWS","Pentax_AWS_Miller","Truview","UESCOPE_MAC")
description<-c("Airtraq","Airtraq Non-channeled","APA DAB","APA MAC","C-MAC D","C-MAC","C_MAC Miller","CEL 100","Glidescope","Imago V-Blade","King Vision Channeled","King Vision Non-channeled","Macintosh","McGrath MAC","McGrath Series 5", "Pentax AWS","Pentax AWS Miller","Truview","UESCOPE MAC")
treat.codes.cormack<-data.frame(id,description)
graph.cormack<-gemtc::mtc.network(data.ab = net.cormack,treatments = treat.codes.cormack)
summary(graph.cormack)
## $Description
## [1] "MTC dataset: Network"
## 
## $`Studies per treatment`
##                  Airtraq    Airtraq_Non_channeled                  APA_DAB 
##                       26                        1                        1 
##                  APA_MAC                  C_MAC_D                C_MAC_MAC 
##                        2                       10                       20 
##             C_MAC_Miller                  CEL_100               Glidescope 
##                        1                        1                       39 
##            imago_v_blade     Kingvision_Channeled Kingvision_Non_channeled 
##                        1                       12                        5 
##                Macintosh              McGrath_MAC         McGrath_Series_5 
##                       90                       18                       13 
##               Pentax_AWS        Pentax_AWS_Miller                  Truview 
##                       14                        1                        9 
##              UESCOPE_MAC 
##                        2 
## 
## $`Number of n-arm studies`
## 2-arm 3-arm 4-arm 6-arm 
##    95    14     7     1 
## 
## $`Studies per treatment comparison`
##                          t1                       t2 nr
## 1                   Airtraq                  APA_DAB  1
## 2                   Airtraq                  C_MAC_D  1
## 3                   Airtraq                C_MAC_MAC  1
## 4                   Airtraq               Glidescope  7
## 5                   Airtraq     Kingvision_Channeled  2
## 6                   Airtraq Kingvision_Non_channeled  1
## 7                   Airtraq                Macintosh 20
## 8                   Airtraq              McGrath_MAC  1
## 9                   Airtraq         McGrath_Series_5  1
## 10                  Airtraq               Pentax_AWS  2
## 11                  Airtraq        Pentax_AWS_Miller  1
## 12                  Airtraq                  Truview  1
## 13    Airtraq_Non_channeled                  APA_MAC  1
## 14    Airtraq_Non_channeled Kingvision_Non_channeled  1
## 15    Airtraq_Non_channeled                Macintosh  1
## 16                  APA_DAB                  C_MAC_D  1
## 17                  APA_DAB               Glidescope  1
## 18                  APA_DAB     Kingvision_Channeled  1
## 19                  APA_DAB              McGrath_MAC  1
## 20                  APA_MAC                C_MAC_MAC  1
## 21                  APA_MAC     Kingvision_Channeled  1
## 22                  APA_MAC Kingvision_Non_channeled  1
## 23                  APA_MAC                Macintosh  1
## 24                  C_MAC_D                C_MAC_MAC  1
## 25                  C_MAC_D               Glidescope  4
## 26                  C_MAC_D     Kingvision_Channeled  2
## 27                  C_MAC_D Kingvision_Non_channeled  1
## 28                  C_MAC_D                Macintosh  5
## 29                  C_MAC_D              McGrath_MAC  1
## 30                C_MAC_MAC             C_MAC_Miller  1
## 31                C_MAC_MAC               Glidescope  4
## 32                C_MAC_MAC     Kingvision_Channeled  2
## 33                C_MAC_MAC Kingvision_Non_channeled  1
## 34                C_MAC_MAC                Macintosh 14
## 35                C_MAC_MAC              McGrath_MAC  2
## 36                C_MAC_MAC         McGrath_Series_5  3
## 37                C_MAC_MAC               Pentax_AWS  1
## 38             C_MAC_Miller                Macintosh  1
## 39                  CEL_100                Macintosh  1
## 40               Glidescope            imago_v_blade  1
## 41               Glidescope     Kingvision_Channeled  4
## 42               Glidescope Kingvision_Non_channeled  2
## 43               Glidescope                Macintosh 22
## 44               Glidescope              McGrath_MAC  4
## 45               Glidescope         McGrath_Series_5  4
## 46               Glidescope               Pentax_AWS  5
## 47               Glidescope                  Truview  1
## 48     Kingvision_Channeled Kingvision_Non_channeled  1
## 49     Kingvision_Channeled                Macintosh  6
## 50     Kingvision_Channeled              McGrath_MAC  2
## 51 Kingvision_Non_channeled                Macintosh  3
## 52                Macintosh              McGrath_MAC 11
## 53                Macintosh         McGrath_Series_5  8
## 54                Macintosh               Pentax_AWS 10
## 55                Macintosh        Pentax_AWS_Miller  1
## 56                Macintosh                  Truview  8
## 57                Macintosh              UESCOPE_MAC  2
## 58              McGrath_MAC               Pentax_AWS  1
## 59              McGrath_MAC                  Truview  2
## 60               Pentax_AWS        Pentax_AWS_Miller  1
## 61               Pentax_AWS                  Truview  1
plot(graph.cormack,use.description = TRUE,vertex.shape="circle",vertex.color="white",vertex.size=15,vertex.label.color="dark blue",vertex.label.cex=1.0,edge.color="light blue")

model.cormack <- gemtc::mtc.model(graph.cormack,likelihood = "binom",link = "logit",linearModel = "random",n.chain = 4)
mcmc.cormack.1 <- gemtc::mtc.run(model.cormack, n.adapt = 50, n.iter = 1000, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 266
##    Unobserved stochastic nodes: 285
##    Total graph size: 5582
## 
## Initializing model
## Warning in rjags::jags.model(file.model, data = syntax[["data"]], inits =
## syntax[["inits"]], : Adaptation incomplete
## NOTE: Stopping adaptation
summary(mcmc.cormack.1)
## 
## Results on the Log Odds Ratio scale
## 
## Iterations = 60:1050
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 100 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                           Mean     SD Naive SE Time-series SE
## d.Glidescope.Airtraq                  -0.19269 0.5741  0.02871        0.04347
## d.Glidescope.APA_DAB                   3.13640 1.4291  0.07145        0.07667
## d.Glidescope.C_MAC_D                  -1.88956 1.0115  0.05058        0.08030
## d.Glidescope.C_MAC_MAC                 1.68015 0.6163  0.03082        0.04324
## d.Glidescope.imago_v_blade             0.17576 1.8059  0.09030        0.09092
## d.Glidescope.Kingvision_Channeled      0.87910 0.6943  0.03472        0.04015
## d.Glidescope.Kingvision_Non_channeled -1.16707 1.1205  0.05603        0.07353
## d.Glidescope.Macintosh                 2.84298 0.4771  0.02385        0.03965
## d.Glidescope.McGrath_MAC               0.79077 0.7338  0.03669        0.03800
## d.Glidescope.McGrath_Series_5          0.48085 0.8546  0.04273        0.05193
## d.Glidescope.Pentax_AWS               -0.61827 0.9384  0.04692        0.05977
## d.Glidescope.Truview                  -1.01112 1.0749  0.05375        0.06249
## d.Macintosh.Airtraq_Non_channeled     -3.57561 1.6818  0.08409        0.09897
## d.Macintosh.APA_MAC                   -0.46338 1.1796  0.05898        0.05895
## d.Macintosh.C_MAC_Miller              -0.67753 1.6257  0.08128        0.08108
## d.Macintosh.CEL_100                   -1.95646 2.0309  0.10155        0.12499
## d.Macintosh.Pentax_AWS_Miller         -1.53436 1.5608  0.07804        0.07477
## d.Macintosh.UESCOPE_MAC               -0.09603 1.6966  0.08483        0.07836
## sd.d                                   1.66526 0.2915  0.01457        0.02795
## 
## 2. Quantiles for each variable:
## 
##                                          2.5%      25%      50%      75%
## d.Glidescope.Airtraq                  -1.1597 -0.61216 -0.18057  0.19609
## d.Glidescope.APA_DAB                   0.6507  2.15214  3.10346  3.98905
## d.Glidescope.C_MAC_D                  -3.7400 -2.61278 -1.92185 -1.13669
## d.Glidescope.C_MAC_MAC                 0.6088  1.21361  1.66759  2.05832
## d.Glidescope.imago_v_blade            -3.4840 -0.99761  0.29160  1.42771
## d.Glidescope.Kingvision_Channeled     -0.5297  0.44259  0.83520  1.31106
## d.Glidescope.Kingvision_Non_channeled -3.3720 -1.90744 -1.16203 -0.38973
## d.Glidescope.Macintosh                 1.9656  2.53778  2.81622  3.12819
## d.Glidescope.McGrath_MAC              -0.5791  0.30234  0.79473  1.26308
## d.Glidescope.McGrath_Series_5         -1.1883 -0.05289  0.41530  1.03779
## d.Glidescope.Pentax_AWS               -2.5011 -1.21288 -0.57518 -0.01033
## d.Glidescope.Truview                  -2.9955 -1.75842 -0.99502 -0.28744
## d.Macintosh.Airtraq_Non_channeled     -7.0335 -4.61991 -3.50616 -2.46254
## d.Macintosh.APA_MAC                   -2.7718 -1.20939 -0.40423  0.32977
## d.Macintosh.C_MAC_Miller              -3.7034 -1.70273 -0.57749  0.37376
## d.Macintosh.CEL_100                   -6.0893 -3.19638 -1.87366 -0.42839
## d.Macintosh.Pentax_AWS_Miller         -4.4176 -2.63219 -1.47359 -0.55866
## d.Macintosh.UESCOPE_MAC               -3.8492 -1.17479  0.01171  1.11037
## sd.d                                   1.1828  1.47635  1.63012  1.80639
##                                          97.5%
## d.Glidescope.Airtraq                   0.98788
## d.Glidescope.APA_DAB                   6.14181
## d.Glidescope.C_MAC_D                  -0.03371
## d.Glidescope.C_MAC_MAC                 2.91518
## d.Glidescope.imago_v_blade             3.51857
## d.Glidescope.Kingvision_Channeled      2.30964
## d.Glidescope.Kingvision_Non_channeled  1.09270
## d.Glidescope.Macintosh                 3.80330
## d.Glidescope.McGrath_MAC               2.24318
## d.Glidescope.McGrath_Series_5          2.23096
## d.Glidescope.Pentax_AWS                1.21225
## d.Glidescope.Truview                   1.01560
## d.Macintosh.Airtraq_Non_channeled     -0.44284
## d.Macintosh.APA_MAC                    1.70294
## d.Macintosh.C_MAC_Miller               2.40992
## d.Macintosh.CEL_100                    1.92992
## d.Macintosh.Pentax_AWS_Miller          1.46389
## d.Macintosh.UESCOPE_MAC                2.81532
## sd.d                                   2.32690
## 
## -- Model fit (residual deviance):
## 
##     Dbar       pD      DIC 
## 203.5972 151.6099 355.2071 
## 
## 266 data points, ratio 0.7654, I^2 = 0%
mcmc.cormack.2 <- gemtc::mtc.run(model.cormack, n.adapt = 5000, n.iter = 1e5, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 266
##    Unobserved stochastic nodes: 285
##    Total graph size: 5582
## 
## Initializing model
summary(mcmc.cormack.2)
## 
## Results on the Log Odds Ratio scale
## 
## Iterations = 5010:105000
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 10000 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                           Mean     SD Naive SE Time-series SE
## d.Glidescope.Airtraq                  -0.19715 0.5722 0.002861       0.004475
## d.Glidescope.APA_DAB                   2.95034 1.4500 0.007250       0.007385
## d.Glidescope.C_MAC_D                  -2.23359 1.2218 0.006109       0.014789
## d.Glidescope.C_MAC_MAC                 1.69547 0.6432 0.003216       0.005622
## d.Glidescope.imago_v_blade             0.20384 1.8272 0.009136       0.009136
## d.Glidescope.Kingvision_Channeled      0.81994 0.7134 0.003567       0.004823
## d.Glidescope.Kingvision_Non_channeled -1.17769 1.1554 0.005777       0.007469
## d.Glidescope.Macintosh                 2.86908 0.4786 0.002393       0.005235
## d.Glidescope.McGrath_MAC               0.72647 0.7450 0.003725       0.005126
## d.Glidescope.McGrath_Series_5          0.46869 0.8633 0.004316       0.005799
## d.Glidescope.Pentax_AWS               -0.70370 0.9247 0.004624       0.005906
## d.Glidescope.Truview                  -0.99843 1.0773 0.005386       0.006900
## d.Macintosh.Airtraq_Non_channeled     -3.42812 1.6368 0.008184       0.010051
## d.Macintosh.APA_MAC                   -0.46179 1.1961 0.005981       0.006203
## d.Macintosh.C_MAC_Miller              -0.72094 1.6243 0.008122       0.008016
## d.Macintosh.CEL_100                   -2.16112 2.2110 0.011055       0.015704
## d.Macintosh.Pentax_AWS_Miller         -1.72235 1.5292 0.007646       0.008177
## d.Macintosh.UESCOPE_MAC               -0.07797 1.7669 0.008834       0.009037
## sd.d                                   1.70075 0.2711 0.001355       0.003926
## 
## 2. Quantiles for each variable:
## 
##                                          2.5%     25%      50%      75%
## d.Glidescope.Airtraq                  -1.3306 -0.5714 -0.19704  0.17940
## d.Glidescope.APA_DAB                   0.1088  1.9941  2.93895  3.89184
## d.Glidescope.C_MAC_D                  -4.8025 -3.0052 -2.17258 -1.39811
## d.Glidescope.C_MAC_MAC                 0.4885  1.2602  1.67623  2.11007
## d.Glidescope.imago_v_blade            -3.4335 -0.9902  0.21467  1.39767
## d.Glidescope.Kingvision_Channeled     -0.5527  0.3442  0.81004  1.28612
## d.Glidescope.Kingvision_Non_channeled -3.5578 -1.9181 -1.14178 -0.40492
## d.Glidescope.Macintosh                 1.9916  2.5426  2.84517  3.17209
## d.Glidescope.McGrath_MAC              -0.7313  0.2307  0.72467  1.21627
## d.Glidescope.McGrath_Series_5         -1.1959 -0.1079  0.44964  1.02916
## d.Glidescope.Pentax_AWS               -2.6150 -1.2981 -0.67297 -0.07564
## d.Glidescope.Truview                  -3.1836 -1.7016 -0.97916 -0.26265
## d.Macintosh.Airtraq_Non_channeled     -6.6653 -4.5101 -3.42104 -2.33650
## d.Macintosh.APA_MAC                   -2.8363 -1.2476 -0.45371  0.32015
## d.Macintosh.C_MAC_Miller              -3.9320 -1.7994 -0.72020  0.35479
## d.Macintosh.CEL_100                   -6.7666 -3.5439 -2.09281 -0.69227
## d.Macintosh.Pentax_AWS_Miller         -4.8438 -2.7146 -1.69414 -0.70501
## d.Macintosh.UESCOPE_MAC               -3.5680 -1.2239 -0.08957  1.07622
## sd.d                                   1.2370  1.5102  1.67783  1.86509
##                                          97.5%
## d.Glidescope.Airtraq                   0.93776
## d.Glidescope.APA_DAB                   5.84372
## d.Glidescope.C_MAC_D                  -0.01062
## d.Glidescope.C_MAC_MAC                 3.02004
## d.Glidescope.imago_v_blade             3.81609
## d.Glidescope.Kingvision_Channeled      2.25305
## d.Glidescope.Kingvision_Non_channeled  1.01254
## d.Glidescope.Macintosh                 3.87528
## d.Glidescope.McGrath_MAC               2.18863
## d.Glidescope.McGrath_Series_5          2.20850
## d.Glidescope.Pentax_AWS                1.04103
## d.Glidescope.Truview                   1.03557
## d.Macintosh.Airtraq_Non_channeled     -0.19167
## d.Macintosh.APA_MAC                    1.91500
## d.Macintosh.C_MAC_Miller               2.47913
## d.Macintosh.CEL_100                    2.01769
## d.Macintosh.Pentax_AWS_Miller          1.23344
## d.Macintosh.UESCOPE_MAC                3.40197
## sd.d                                   2.29491
## 
## -- Model fit (residual deviance):
## 
##     Dbar       pD      DIC 
## 201.9404 153.0157 354.9561 
## 
## 266 data points, ratio 0.7592, I^2 = 0%
gelman.plot(mcmc.cormack.1)

gelman.plot(mcmc.cormack.2)

gelman.diag(mcmc.cormack.1)$mpsrf
## [1] 1.197373
gelman.diag(mcmc.cormack.2)$mpsrf
## [1] 1.001596
nodesplit.cormack <- gemtc::mtc.nodesplit(graph.cormack, linearModel = "random", likelihood = "binom",link = "logit",n.adapt = 5000, n.iter = 1e5, thin = 10)
summary(nodesplit.cormack)
forest(relative.effect(mcmc.cormack.2, t1 = "Macintosh"), use.description = TRUE,xlim = c(-10, 10))

rank.probability.cormack <- gemtc::rank.probability(mcmc.cormack.2, preferredDirection = -1)
plot(rank.probability.cormack, beside=TRUE)

sucra.cormack <- dmetar::sucra(rank.probability.cormack, lower.is.better = FALSE)
sucra.cormack
##                              SUCRA
## C_MAC_D                  0.9312417
## Kingvision_Non_channeled 0.8246833
## Truview                  0.8039583
## Pentax_AWS               0.7667431
## Airtraq_Non_channeled    0.7044111
## Airtraq                  0.6837069
## Glidescope               0.6372319
## imago_v_blade            0.5726958
## McGrath_Series_5         0.5331028
## CEL_100                  0.4894667
## McGrath_MAC              0.4753958
## Kingvision_Channeled     0.4555056
## Pentax_AWS_Miller        0.4131931
## C_MAC_MAC                0.2880931
## C_MAC_Miller             0.2639778
## APA_MAC                  0.2017944
## UESCOPE_MAC              0.1904056
## APA_DAB                  0.1498944
## Macintosh                0.1144986
meta::metaprop(event = net.cormack$responders[net.cormack$treatment=="Macintosh"],n= net.cormack$sampleSize[net.cormack$treatment=="Macintosh"], studlab = net.cormack$study[net.cormack$treatment=="Macintosh"],data = net.cormack[net.cormack$treatment=="Macintosh",],method = "GLMM",sm = "PLOGIT",comb.fixed = FALSE,comb.random = TRUE,hakn = TRUE)
##                        proportion           95%-CI
## Abdallah 2011              0.2245 [0.1177; 0.3662]
## Abdelgalel 2018            0.3000 [0.1656; 0.4653]
## Abdelgawad 2015            0.0000 [0.0000; 0.0881]
## Aggarwal 2019              0.0000 [0.0000; 0.0711]
## Agrawal 2020               0.7500 [0.5880; 0.8731]
## Ahmad 2016                 0.5312 [0.4266; 0.6339]
## Akbar 2015                 0.0667 [0.0140; 0.1827]
## Al - Ghamdi 2016           0.1364 [0.0291; 0.3491]
## Ali 2017                   0.2667 [0.1228; 0.4589]
## Altun 2018                 0.0750 [0.0157; 0.2039]
## Andersen 2011              0.2800 [0.1623; 0.4249]
## Aoi 2010                   0.5294 [0.2781; 0.7702]
## Aqil 2016                  0.1000 [0.0279; 0.2366]
## Aqil 2017                  0.2000 [0.1139; 0.3127]
## Arici 2014                 0.0000 [0.0000; 0.0881]
## Arslan 2017                0.0000 [0.0000; 0.0881]
## Aziz 2012                  0.1905 [0.1305; 0.2634]
## Bakshi 2019                0.0000 [0.0000; 0.0974]
## Barak 2007                 0.0667 [0.0249; 0.1395]
## Bashir 2020                0.0500 [0.0061; 0.1692]
## Colak 2019                 0.2222 [0.1120; 0.3709]
## El - Tahan 2018            0.1250 [0.0351; 0.2899]
## Huang 2020                 0.2333 [0.0993; 0.4228]
## Wasem 2013                 0.0000 [0.0000; 0.1157]
## Yao 2015                   0.0000 [0.0000; 0.0740]
## Bhalla 2018                0.0000 [0.0000; 0.2180]
## Bhat 2015                  0.3000 [0.1786; 0.4461]
## Bilehjani 2009             0.0263 [0.0007; 0.1381]
## Blajic 2019                0.1186 [0.0491; 0.2293]
## Cakir 2020                 0.5806 [0.3908; 0.7545]
## Caparlar 2019              0.0000 [0.0000; 0.0903]
## Chandrashekaraiah 2017     0.1333 [0.0376; 0.3072]
## Colak 2015                 0.1020 [0.0340; 0.2223]
## Dey 2020                   0.3000 [0.2163; 0.3948]
## Erden 2010                 0.1875 [0.0405; 0.4565]
## Erturk 2015                0.2000 [0.0905; 0.3565]
## Gao 2018                   0.1951 [0.1158; 0.2974]
## Gupta 2020                 0.2500 [0.1269; 0.4120]
## Hamp 2015                  0.2000 [0.0573; 0.4366]
## Hosalli 2017               0.1000 [0.0211; 0.2653]
## Hu 2017                    0.0400 [0.0110; 0.0993]
## Ilyas 2014                 0.2283 [0.1586; 0.3112]
## Jungbauer 2009             0.3600 [0.2664; 0.4621]
## Kaur 2020                  0.2250 [0.1084; 0.3845]
## Khan 2008                  0.1364 [0.0291; 0.3491]
## Kido 2015                  0.2000 [0.0683; 0.4070]
## Kim 2013                   0.6522 [0.4273; 0.8362]
## Kleine-Brueggeney 2017     0.6833 [0.5922; 0.7652]
## Kunaz 2016                 0.0800 [0.0222; 0.1923]
## Laosuwan 2015              0.2727 [0.0602; 0.6097]
## Lascarrou 2017             0.1864 [0.1319; 0.2517]
## Lim 2005                   0.2667 [0.1228; 0.4589]
## Lin 2012                   0.0610 [0.0201; 0.1366]
## Liu 2016                   0.0787 [0.0322; 0.1554]
## Maharaj 2006               0.0333 [0.0008; 0.1722]
## Maharaj 2007               0.3500 [0.1539; 0.5922]
## Maharaj 2008               0.8500 [0.6211; 0.9679]
## Mahmood 2015               0.0667 [0.0082; 0.2207]
## Malik 2008                 0.1667 [0.0564; 0.3472]
## Malik_1 2009               0.3200 [0.1495; 0.5350]
## Malik_2 2009               0.0667 [0.0082; 0.2207]
## Maruyama 2008              0.0000 [0.0000; 0.2646]
## Mcelwain 2011              0.3226 [0.1668; 0.5137]
## Kulkarni 2013              0.0333 [0.0008; 0.1722]
## Inangil 2018               0.1143 [0.0320; 0.2674]
## Ing 2017                   0.0000 [0.0000; 0.2059]
## Jafra 2018                 0.0400 [0.0110; 0.0993]
## Ndoko 2008                 0.2075 [0.1084; 0.3411]
## Ninan 2016                 0.0333 [0.0008; 0.1722]
## Nishiyama 2011             0.1143 [0.0320; 0.2674]
## Parasa 2016                0.1333 [0.0376; 0.3072]
## Pazur 2016                 0.0769 [0.0095; 0.2513]
## Ranieri 2012               0.1094 [0.0451; 0.2125]
## Reena 2019                 0.1800 [0.0858; 0.3144]
## Risse 2020                 0.0323 [0.0008; 0.1670]
## Ruetzeler 2020             0.1587 [0.0788; 0.2726]
## Shah 2016                  0.0345 [0.0009; 0.1776]
## Sun 2005                   0.1800 [0.1103; 0.2695]
## Takenaka 2011              0.0588 [0.0072; 0.1968]
## Teoh 2010                  0.0500 [0.0164; 0.1128]
## Toker 2019                 0.2600 [0.1463; 0.4034]
## Tolon 2012                 0.1000 [0.0123; 0.3170]
## Tsan 2020                  0.0000 [0.0000; 0.0521]
## Varsha 2019                0.3143 [0.1685; 0.4929]
## Vijayakumar 2016           0.2667 [0.1460; 0.4194]
## Walker 2009                0.0000 [0.0000; 0.0596]
## Yoo 2018                   0.4545 [0.2439; 0.6779]
## Yumul 2016                 0.3548 [0.1923; 0.5463]
## Cavus 2011                 0.1400 [0.0582; 0.2674]
## El-Tahan 2017              0.0000 [0.0000; 0.2316]
## 
## Number of studies combined: k = 90
## 
##                      proportion           95%-CI
## Random effects model     0.1230 [0.0938; 0.1598]
## 
## Quantifying heterogeneity:
##  tau^2 = 1.7082; tau = 1.3070; I^2 = 90.8%; H = 3.30
## 
## Test of heterogeneity:
##       Q d.f.  p-value             Test
##  463.93   89 < 0.0001        Wald-type
##  819.01   89 < 0.0001 Likelihood-Ratio
## 
## Details on meta-analytical method:
## - Random intercept logistic regression model
## - Maximum-likelihood estimator for tau^2
## - Hartung-Knapp adjustment for random effects model
## - Logit transformation
## - Clopper-Pearson confidence interval for individual studies
## - Continuity correction of 0.5 in studies with zero cell frequencies
##   (only used to calculate individual study results)
#Funnel Plot for Cormack and Lehane's classification
funnel.cormack<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Tables Funnel Plots/Funnel Network Cormack.csv")
funnel.cormack_analysis<-dplyr::filter(funnel.cormack,funnel.cormack$fcormack.e1>0 | funnel.cormack$fcormack.e2>0)
mbin_funnel.cormack_random<-meta::metabin(fcormack.e1,fcormack.t1,fcormack.e2,fcormack.t2,data = funnel.cormack_analysis,studlab =study,comb.fixed = FALSE,comb.random = TRUE,method.tau = "SJ",hakn = TRUE,prediction = TRUE,incr = 0.5,sm = "RR")
mbin_funnel.cormack_random
##                            RR             95%-CI %W(random)
## Abdallah 2011          0.6236 [0.2634;   1.4765]        1.3
## Abdelgalel 2018        0.1667 [0.0398;   0.6974]        1.0
## Abdelgalel 2018        0.2500 [0.0763;   0.8192]        1.2
## Agrawal 2020           0.0164 [0.0010;   0.2591]        0.6
## Ahmad 2016             0.0483 [0.0121;   0.1920]        1.1
## Akbar 2015             0.6667 [0.1169;   3.8014]        0.9
## Akbas 2019             0.5556 [0.2041;   1.5125]        1.3
## Al - Ghamdi 2016       0.6984 [0.1293;   3.7711]        0.9
## Al - Ghamdi 2016       0.6984 [0.1293;   3.7711]        0.9
## Al - Ghamdi 2016       0.1429 [0.0078;   2.6093]        0.5
## Ali 2017               0.2500 [0.0578;   1.0814]        1.0
## Altun 2018             1.3333 [0.3186;   5.5792]        1.0
## Altun 2018             0.6667 [0.1176;   3.7781]        0.9
## Andersen 2011          0.1429 [0.0342;   0.5962]        1.0
## Aoi 2010               0.0526 [0.0033;   0.8362]        0.6
## Aqil 2016              0.2500 [0.0292;   2.1399]        0.7
## Aqil 2017              0.2143 [0.0644;   0.7130]        1.2
## Arici 2014             5.0000 [0.2477; 100.9273]        0.5
## Arslan 2015            3.0000 [0.1259;  71.4820]        0.5
## Aziz 2012              0.3523 [0.1776;   0.6991]        1.4
## Aziz 2016              1.3947 [0.6902;   2.8185]        1.4
## Barak 2007             0.0865 [0.0049;   1.5111]        0.5
## Bashir 2020            0.2000 [0.0099;   4.0371]        0.5
## Belze 2017             7.0000 [0.3747; 130.7601]        0.5
## Bhat 2015              0.2667 [0.0951;   0.7476]        1.2
## Bilehjani 2009         0.3169 [0.0133;   7.5444]        0.5
## Blajic 2019            0.2857 [0.0619;   1.3188]        1.0
## Blajic 2019            0.7024 [0.2362;   2.0889]        1.2
## Brozek 2020            1.0000 [0.2633;   3.7983]        1.1
## \x82akir 2020          1.0000 [0.6550;   1.5266]        1.5
## Cavus 2018             1.6765 [0.2917;   9.6364]        0.9
## Cavus 2018             0.9804 [0.2077;   4.6285]        1.0
## Cavus 2011             0.9653 [0.3324;   2.8029]        1.2
## Cavus 2011             0.3968 [0.0524;   3.0059]        0.8
## Cavus 2011             0.7937 [0.2710;   2.3246]        1.2
## Chandrashekaraiah 2017 0.5000 [0.0989;   2.5270]        1.0
## Colak 2019             0.7000 [0.2924;   1.6759]        1.3
## \x82olak 2015          0.0928 [0.0053;   1.6329]        0.5
## \x82olak 2015          0.2130 [0.0259;   1.7555]        0.8
## Dey 2020               0.3086 [0.1602;   0.5947]        1.4
## El - Tahan 2018        1.1765 [0.3463;   3.9967]        1.1
## El - Tahan 2018        1.1429 [0.3360;   3.8877]        1.1
## El - Tahan 2018        1.7500 [0.5673;   5.3986]        1.2
## Erden 2010             0.3333 [0.0387;   2.8745]        0.7
## Erturk 2015            0.2500 [0.0566;   1.1051]        1.0
## Gao 2018               0.9491 [0.5035;   1.7891]        1.4
## Gupta 2020             0.0476 [0.0029;   0.7857]        0.5
## Hamp 2015              0.1302 [0.0075;   2.2530]        0.5
## Hosalli 2017           0.1429 [0.0077;   2.6497]        0.5
## Hu 2017                0.1111 [0.0061;   2.0369]        0.5
## Huang 2020             0.0689 [0.0041;   1.1536]        0.5
## Huang 2020             0.0667 [0.0040;   1.1166]        0.5
## Ilyas 2014             0.0169 [0.0010;   0.2744]        0.5
## Inangil 2018           0.1111 [0.0062;   1.9882]        0.5
## Jafra 2018             0.1111 [0.0061;   2.0369]        0.5
## Jungbauer 2009         0.2778 [0.1460;   0.5286]        1.4
## Kaur 2020              0.0526 [0.0032;   0.8744]        0.5
## Kaur 2020              0.0526 [0.0032;   0.8744]        0.5
## Khan 2008              0.1429 [0.0078;   2.6093]        0.5
## Kido 2015              0.0909 [0.0053;   1.5599]        0.5
## Kim 2013               0.0337 [0.0021;   0.5303]        0.6
## Kleine-Brueggeney 2016 0.1429 [0.0075;   2.7360]        0.5
## Kleine-Brueggeney 2016 1.6667 [0.4074;   6.8185]        1.1
## Kleine-Brueggeney 2016 0.3333 [0.0352;   3.1594]        0.7
## Kleine-Brueggeney 2016 9.0000 [2.8055;  28.8720]        1.2
## Kleine-Brueggeney 2016 1.6667 [0.4074;   6.8185]        1.1
## Kleine-Brueggeney 2017 0.0488 [0.0185;   0.1288]        1.3
## Kleine-Brueggeney 2017 0.7073 [0.5668;   0.8827]        1.5
## Kleine-Brueggeney 2017 0.0061 [0.0004;   0.0966]        0.6
## Kulkarni 2013          0.3333 [0.0141;   7.8648]        0.5
## Kunaz 2016             0.1111 [0.0061;   2.0105]        0.5
## Lange 2009             0.2000 [0.0100;   3.9955]        0.5
## Laosuwan 2015          0.1429 [0.0083;   2.4653]        0.5
## Lascarrou 2017         0.5486 [0.3213;   0.9367]        1.4
## Lee 2012               0.1111 [0.0063;   1.9591]        0.5
## Lee 2012               0.1111 [0.0063;   1.9591]        0.5
## Lee 2012               0.1111 [0.0063;   1.9591]        0.5
## Lim 2005               0.0588 [0.0035;   0.9748]        0.5
## Lin 2012               0.1976 [0.0236;   1.6548]        0.8
## Liu 2016               0.0674 [0.0039;   1.1627]        0.5
## Liu 2019               0.0076 [0.0005;   0.1219]        0.6
## Maharaj 2006           0.3333 [0.0141;   7.8648]        0.5
## Maharaj 2007           0.0667 [0.0041;   1.0925]        0.5
## Maharaj 2008           0.0286 [0.0018;   0.4441]        0.6
## Mahmood 2015           0.2000 [0.0100;   3.9955]        0.5
## Malik 2008             0.4000 [0.0841;   1.9031]        1.0
## Malik 2008             0.0909 [0.0053;   1.5735]        0.5
## Malik 2008             0.0909 [0.0053;   1.5735]        0.5
## Malik1 2009            0.0588 [0.0036;   0.9662]        0.5
## Malik1 2009            0.0588 [0.0036;   0.9662]        0.5
## Malik2 2009            0.2000 [0.0100;   3.9955]        0.5
## Markham 2019           7.0000 [0.3681; 133.1081]        0.5
## Mcelwain 2011          0.4276 [0.1507;   1.2135]        1.2
## Mcelwain 2011          0.0508 [0.0031;   0.8297]        0.5
## Ndoko 2008             0.0435 [0.0026;   0.7193]        0.5
## Ninan 2016             0.3333 [0.0141;   7.8648]        0.5
## Nishiyama 2011         1.4583 [0.4497;   4.7294]        1.2
## Nishiyama 2011         1.5441 [0.4774;   4.9942]        1.2
## Nishiyama 2011         1.9444 [0.6431;   5.8788]        1.2
## Parasa 2016            0.2500 [0.0296;   2.1081]        0.7
## Pazur 2016             0.2000 [0.0101;   3.9701]        0.5
## Ranieri 2012           0.0628 [0.0037;   1.0770]        0.5
## Reena 2019             0.2222 [0.0505;   0.9774]        1.0
## Risse 2020             0.3043 [0.0129;   7.2021]        0.5
## Ruetzeler 2020         0.2864 [0.0826;   0.9927]        1.1
## Shah 2016              0.3224 [0.0137;   7.6018]        0.5
## Shim 2016              0.3224 [0.0137;   7.6018]        0.5
## Sun 2005               0.8333 [0.4453;   1.5594]        1.4
## Takenaka 2011          0.1944 [0.0097;   3.9039]        0.5
## Teoh 2010              0.0909 [0.0051;   1.6224]        0.5
## Teoh 2010              0.4000 [0.0795;   2.0136]        1.0
## Teoh 2010              0.2000 [0.0238;   1.6814]        0.7
## Toker 2019             0.1538 [0.0366;   0.6469]        1.0
## Tolon 2012             0.2000 [0.0102;   3.9140]        0.5
## Vargas 2017            1.1111 [0.5702;   2.1652]        1.4
## Varsha 2019            0.0435 [0.0027;   0.7100]        0.5
## Vijayakumar 2016       0.0400 [0.0024;   0.6556]        0.5
## Walker 2009            3.0000 [0.1247;  72.1913]        0.5
## Wan 2016               1.1938 [0.4365;   3.2652]        1.2
## Yoo 2018               0.1000 [0.0140;   0.7162]        0.8
## Yumul 2016             0.1879 [0.0454;   0.7777]        1.0
## Yumul 2016             0.1879 [0.0454;   0.7777]        1.0
## Yumul 2016             0.0449 [0.0028;   0.7291]        0.5
## 
## Number of studies combined: k = 123
## 
##                          RR           95%-CI     t  p-value
## Random effects model 0.3242 [0.2570; 0.4092] -9.59 < 0.0001
## Prediction interval         [0.0397; 2.6500]               
## 
## Quantifying heterogeneity:
##  tau^2 = 1.1122 [0.3966; 1.2178]; tau = 1.0546 [0.6298; 1.1035];
##  I^2 = 56.3% [46.6%; 64.3%]; H = 1.51 [1.37; 1.67]
## 
## Test of heterogeneity:
##       Q d.f.  p-value
##  279.47  122 < 0.0001
## 
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Continuity correction of 0.5 in studies with zero cell frequencies
meta::funnel(mbin_funnel.cormack_random,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))

dmetar::eggers.test(mbin_funnel.cormack_random)
##              Intercept ConfidenceInterval      t p
## Egger's test    -1.209      -1.601--0.817 -6.408 0

Network meta-analysis of time for intubation

net.time<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Table Analysis Network Time.csv")
sum(net.time$sampleSize)
## [1] 12280
table(net.time$treatment)
## 
##                  Airtraq                  C_MAC_D                C_MAC_MAC 
##                       30                        8                       18 
##               Glidescope     Kingvision_Channeled Kingvision_Non_channeled 
##                       40                       13                        4 
##                Macintosh              McGrath_MAC         McGrath_Series_5 
##                      105                       11                       14 
##               Pentax_AWS        Pentax_AWS_Miller                  Tosight 
##                       12                        1                        1 
##                  Truview              UESCOPE_MAC 
##                       15                        2
id<-c("Airtraq","C_MAC_D","C_MAC_MAC","Glidescope","Kingvision_Channeled","Kingvision_Non_channeled","Macintosh","McGrath_MAC","McGrath_Series_5","Pentax_AWS","Pentax_AWS_Miller","Tosight","Truview","UESCOPE_MAC")
description<-c("Airtraq","C-MAC D","C-MAC","Glidescope","King Vision Channeled","King Vision Non-channeled","Macintosh","McGrath MAC","McGrath Series 5","Pentax AWS","Pentax AWS Miller","Tosight","Truview","UESCOPE MAC")
treat.codes.time<-data.frame(id,description)
graph.time<-gemtc::mtc.network(data.ab = net.time,treatments = treat.codes.time)
summary(graph.time)
## $Description
## [1] "MTC dataset: Network"
## 
## $`Studies per treatment`
##                  Airtraq                  C_MAC_D                C_MAC_MAC 
##                       30                        8                       18 
##               Glidescope     Kingvision_Channeled Kingvision_Non_channeled 
##                       40                       13                        4 
##                Macintosh              McGrath_MAC         McGrath_Series_5 
##                      105                       11                       14 
##               Pentax_AWS        Pentax_AWS_Miller                  Tosight 
##                       12                        1                        1 
##                  Truview              UESCOPE_MAC 
##                       15                        2 
## 
## $`Number of n-arm studies`
## 2-arm 3-arm 4-arm 
##   106    14     5 
## 
## $`Studies per treatment comparison`
##                          t1                       t2 nr
## 1                   Airtraq                C_MAC_MAC  1
## 2                   Airtraq               Glidescope  3
## 3                   Airtraq     Kingvision_Channeled  2
## 4                   Airtraq                Macintosh 24
## 5                   Airtraq         McGrath_Series_5  2
## 6                   Airtraq               Pentax_AWS  3
## 7                   Airtraq        Pentax_AWS_Miller  1
## 8                   Airtraq                  Truview  1
## 9                   C_MAC_D                C_MAC_MAC  1
## 10                  C_MAC_D               Glidescope  2
## 11                  C_MAC_D     Kingvision_Channeled  1
## 12                  C_MAC_D Kingvision_Non_channeled  1
## 13                  C_MAC_D                Macintosh  5
## 14                  C_MAC_D                  Truview  1
## 15                C_MAC_MAC               Glidescope  4
## 16                C_MAC_MAC     Kingvision_Channeled  1
## 17                C_MAC_MAC Kingvision_Non_channeled  1
## 18                C_MAC_MAC                Macintosh 13
## 19                C_MAC_MAC              McGrath_MAC  2
## 20                C_MAC_MAC         McGrath_Series_5  3
## 21                C_MAC_MAC               Pentax_AWS  1
## 22               Glidescope     Kingvision_Channeled  2
## 23               Glidescope Kingvision_Non_channeled  2
## 24               Glidescope                Macintosh 30
## 25               Glidescope              McGrath_MAC  2
## 26               Glidescope         McGrath_Series_5  4
## 27               Glidescope               Pentax_AWS  3
## 28               Glidescope                  Truview  1
## 29     Kingvision_Channeled Kingvision_Non_channeled  1
## 30     Kingvision_Channeled                Macintosh  9
## 31     Kingvision_Channeled                  Truview  1
## 32 Kingvision_Non_channeled                Macintosh  1
## 33                Macintosh              McGrath_MAC  7
## 34                Macintosh         McGrath_Series_5  9
## 35                Macintosh               Pentax_AWS 10
## 36                Macintosh        Pentax_AWS_Miller  1
## 37                Macintosh                  Tosight  1
## 38                Macintosh                  Truview 13
## 39                Macintosh              UESCOPE_MAC  2
## 40              McGrath_MAC                  Truview  1
## 41         McGrath_Series_5                  Truview  3
## 42               Pentax_AWS        Pentax_AWS_Miller  1
## 43               Pentax_AWS                  Truview  1
plot(graph.time,use.description = TRUE,vertex.shape="circle",vertex.color="white",vertex.size=15,vertex.label.color="dark blue",vertex.label.cex=1.0,edge.color="light blue")

model.time <- gemtc::mtc.model(graph.time,likelihood = "normal",link = "identity",linearModel = "random",n.chain = 4)
mcmc.time.1 <- gemtc::mtc.run(model.time, n.adapt = 50, n.iter = 1000, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 274
##    Unobserved stochastic nodes: 288
##    Total graph size: 3613
## 
## Initializing model
## Warning in rjags::jags.model(file.model, data = syntax[["data"]], inits =
## syntax[["inits"]], : Adaptation incomplete
## NOTE: Stopping adaptation
summary(mcmc.time.1)
## 
## Results on the Mean Difference scale
## 
## Iterations = 60:1050
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 100 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                         Mean      SD Naive SE Time-series SE
## d.Macintosh.Airtraq                  -5.0875  2.2585   0.1129        0.10306
## d.Macintosh.C_MAC_D                  -2.5373  4.2862   0.2143        0.18263
## d.Macintosh.C_MAC_MAC                -3.8272  2.7758   0.1388        0.12888
## d.Macintosh.Glidescope                2.5782  2.0472   0.1024        0.10190
## d.Macintosh.Kingvision_Channeled     -1.1014  3.4645   0.1732        0.16069
## d.Macintosh.Kingvision_Non_channeled -0.2474  5.8469   0.2923        0.25024
## d.Macintosh.McGrath_MAC               3.4461  3.8121   0.1906        0.18112
## d.Macintosh.McGrath_Series_5         14.2376  3.2457   0.1623        0.14854
## d.Macintosh.Pentax_AWS               -1.6595  3.1126   0.1556        0.14832
## d.Macintosh.Pentax_AWS_Miller        13.0742 10.3374   0.5169        0.49948
## d.Macintosh.Tosight                  -4.2639 13.8111   0.6906        0.69300
## d.Macintosh.Truview                   9.3910  3.0211   0.1511        0.16791
## d.Macintosh.UESCOPE_MAC              -7.4378  8.8846   0.4442        0.38546
## sd.d                                 12.1044  0.8441   0.0422        0.03364
## 
## 2. Quantiles for each variable:
## 
##                                         2.5%      25%       50%     75%   97.5%
## d.Macintosh.Airtraq                   -9.292  -6.7034 -5.216402 -3.6142 -0.5841
## d.Macintosh.C_MAC_D                  -10.617  -5.2819 -2.502385  0.4000  6.2895
## d.Macintosh.C_MAC_MAC                 -8.909  -5.7496 -3.969426 -2.0026  1.7177
## d.Macintosh.Glidescope                -1.272   1.2322  2.559008  3.7922  6.5310
## d.Macintosh.Kingvision_Channeled      -7.716  -3.4079 -1.333187  1.1927  6.0861
## d.Macintosh.Kingvision_Non_channeled -11.906  -4.3782  0.002836  4.1244  9.6013
## d.Macintosh.McGrath_MAC               -3.617   0.7106  3.569901  6.2421 10.6522
## d.Macintosh.McGrath_Series_5           7.445  12.3010 14.438067 16.1903 20.4490
## d.Macintosh.Pentax_AWS                -7.648  -3.7886 -1.776570  0.5388  4.2789
## d.Macintosh.Pentax_AWS_Miller         -6.919   6.9650 13.277425 19.4357 33.4970
## d.Macintosh.Tosight                  -30.843 -13.5315 -4.562038  4.1712 23.8589
## d.Macintosh.Truview                    3.794   7.3731  9.267216 11.5920 15.0898
## d.Macintosh.UESCOPE_MAC              -24.545 -13.4799 -7.390855 -1.6537 10.1234
## sd.d                                  10.619  11.5243 12.029766 12.6539 13.8473
## 
## -- Model fit (residual deviance):
## 
##     Dbar       pD      DIC 
## 280.6836 261.8981 542.5817 
## 
## 274 data points, ratio 1.024, I^2 = 3%
mcmc.time.2 <- gemtc::mtc.run(model.time, n.adapt = 5000, n.iter = 1e5, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 274
##    Unobserved stochastic nodes: 288
##    Total graph size: 3613
## 
## Initializing model
summary(mcmc.time.2)
## 
## Results on the Mean Difference scale
## 
## Iterations = 5010:105000
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 10000 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                        Mean      SD Naive SE Time-series SE
## d.Macintosh.Airtraq                  -5.048  2.3214 0.011607       0.011652
## d.Macintosh.C_MAC_D                  -2.605  4.3297 0.021649       0.021732
## d.Macintosh.C_MAC_MAC                -3.903  2.8704 0.014352       0.014414
## d.Macintosh.Glidescope                2.570  2.0250 0.010125       0.010125
## d.Macintosh.Kingvision_Channeled     -1.176  3.4193 0.017096       0.017005
## d.Macintosh.Kingvision_Non_channeled -0.210  5.9261 0.029630       0.028679
## d.Macintosh.McGrath_MAC               2.982  3.9040 0.019520       0.019521
## d.Macintosh.McGrath_Series_5         14.423  3.3242 0.016621       0.016876
## d.Macintosh.Pentax_AWS               -1.860  3.5480 0.017740       0.018253
## d.Macintosh.Pentax_AWS_Miller        12.529 10.1242 0.050621       0.051214
## d.Macintosh.Tosight                  -3.241 13.7203 0.068601       0.068600
## d.Macintosh.Truview                   9.421  3.0838 0.015419       0.015419
## d.Macintosh.UESCOPE_MAC              -7.795  8.7284 0.043642       0.043472
## sd.d                                 12.215  0.8437 0.004219       0.004405
## 
## 2. Quantiles for each variable:
## 
##                                         2.5%      25%     50%     75%   97.5%
## d.Macintosh.Airtraq                   -9.612  -6.6070 -5.0465 -3.4666 -0.5171
## d.Macintosh.C_MAC_D                  -11.080  -5.5299 -2.6124  0.3017  5.9206
## d.Macintosh.C_MAC_MAC                 -9.526  -5.8261 -3.9163 -1.9749  1.7356
## d.Macintosh.Glidescope                -1.419   1.2215  2.5796  3.9221  6.5049
## d.Macintosh.Kingvision_Channeled      -7.896  -3.4686 -1.1721  1.1066  5.5624
## d.Macintosh.Kingvision_Non_channeled -11.866  -4.1543 -0.2085  3.7399 11.4498
## d.Macintosh.McGrath_MAC               -4.689   0.3523  2.9863  5.5789 10.7108
## d.Macintosh.McGrath_Series_5           7.912  12.2037 14.4342 16.6546 20.9342
## d.Macintosh.Pentax_AWS                -8.854  -4.2192 -1.8674  0.5357  5.1697
## d.Macintosh.Pentax_AWS_Miller         -7.219   5.6991 12.4833 19.3586 32.4499
## d.Macintosh.Tosight                  -30.109 -12.4527 -3.2594  6.0007 23.7540
## d.Macintosh.Truview                    3.352   7.3589  9.4230 11.4851 15.5375
## d.Macintosh.UESCOPE_MAC              -25.041 -13.5983 -7.7754 -1.9433  9.3077
## sd.d                                  10.682  11.6254 12.1719 12.7630 13.9884
## 
## -- Model fit (residual deviance):
## 
##     Dbar       pD      DIC 
## 280.0633 262.6637 542.7270 
## 
## 274 data points, ratio 1.022, I^2 = 3%
gelman.plot(mcmc.time.1)

gelman.plot(mcmc.time.2)

gelman.diag(mcmc.time.1)$mpsrf
## [1] 1.117755
gelman.diag(mcmc.time.2)$mpsrf
## [1] 1.000599
anohe.time<-gemtc::mtc.anohe(graph.time,n.adapt = 5000, n.iter = 1e5, thin = 10,sampler="rjags")
summary(anohe.time)
nodesplit.time <- gemtc::mtc.nodesplit(graph.time, linearModel = "random", likelihood = "normal",link = "identity",n.adapt = 5000, n.iter = 1e5, thin = 10)
summary(nodesplit.time)
forest(relative.effect(mcmc.time.2, t1 = "Macintosh"), use.description = TRUE,xlim = c(-60, 60))

rank.probability.time <- gemtc::rank.probability(mcmc.time.2, preferredDirection = -1)
plot(rank.probability.time, beside=TRUE)

sucra.time <- dmetar::sucra(rank.probability.time, lower.is.better = FALSE)
sucra.time
##                              SUCRA
## Airtraq                  0.8120712
## UESCOPE_MAC              0.7942558
## C_MAC_MAC                0.7487115
## C_MAC_D                  0.6631904
## Pentax_AWS               0.6278288
## Tosight                  0.6130173
## Kingvision_Channeled     0.5885962
## Kingvision_Non_channeled 0.5394346
## Macintosh                0.5141135
## McGrath_MAC              0.3715288
## Glidescope               0.3596692
## Pentax_AWS_Miller        0.1660942
## Truview                  0.1484135
## McGrath_Series_5         0.0530750
meta::metamean(n = net.time$sampleSize[net.time$treatment=="Macintosh"],mean= net.time$mean[net.time$treatment=="Macintosh"], sd=net.time$std.dev[net.time$treatment=="Macintosh"],studlab = net.time$study[net.time$treatment=="Macintosh"],data = net.time[net.time$treatment=="Macintosh",],method.tau = "SJ",sm = "MRAW",comb.fixed = FALSE,comb.random = TRUE,hakn = TRUE)
##                                   mean              95%-CI %W(random)
## Abdallah 2019                  14.1800 [13.0470;  15.3130]        1.0
## Abdelgalel 2018                28.8000 [25.6174;  31.9826]        1.0
## Abdelgawad - Hipertensos 2015  27.8000 [25.9593;  29.6407]        1.0
## Abdelgawad - Normotensos 2015  26.1000 [24.6976;  27.5024]        1.0
## Aggarwal 2019                  15.5300 [15.1059;  15.9541]        1.0
## Ahmad 2015                     16.1200 [13.8974;  18.3426]        1.0
## Ahmad 2016                     15.4200 [15.0859;  15.7541]        1.0
## Akbar 2015                     38.8000 [36.1996;  41.4004]        1.0
## Akbarzadeh 2017                11.1800 [ 1.6204;  20.7396]        0.9
## Al - Ghamdi 2016               35.1000 [31.5022;  38.6978]        1.0
## Ali 2012                       48.0000 [40.9441;  55.0559]        0.9
## Ali 2017                       32.5000 [28.7069;  36.2931]        1.0
## Altun 2018                     24.0000 [21.4588;  26.5412]        1.0
## Amini 2015                      9.3000 [ 8.8362;   9.7638]        1.0
## Ander 2017                     26.7000 [22.0865;  31.3135]        1.0
## Aoi 2010                       55.6000 [43.5888;  67.6112]        0.9
## Aqil 2016                      41.3300 [36.5917;  46.0683]        1.0
## Aqil 2017                      47.8000 [41.2173;  54.3827]        0.9
## Arici 2014                     32.2000 [30.1609;  34.2391]        1.0
## Arora 2013                     10.9000 [10.3399;  11.4601]        1.0
## Avula 2019                     29.9700 [28.0484;  31.8916]        1.0
## Aziz 2012                      33.0000 [29.5009;  36.4991]        1.0
## Bakshi 2015                    26.0000 [24.7062;  27.2938]        1.0
## Bakshi_2 2015                 103.0000 [97.7461; 108.2539]        1.0
## Bakshi_3 2015                  30.0000 [28.4495;  31.5505]        1.0
## Bakshi 2019                    56.6000 [52.0268;  61.1732]        1.0
## Barak 2007                     24.0000 [21.3142;  26.6858]        1.0
## Barman 2017                    23.5700 [21.8009;  25.3391]        1.0
## Bashir 2020                     9.7000 [ 9.2661;  10.1339]        1.0
## Bhalla 2018                    31.8100 [27.4731;  36.1469]        1.0
## Bhandari 2013                  29.2300 [27.6681;  30.7919]        1.0
## Bharti 2014                    29.6000 [24.4740;  34.7260]        1.0
## Bhat 2015                      33.8000 [31.2721;  36.3279]        1.0
## Bilehjani 2009                 14.5000 [11.8610;  17.1390]        1.0
## Blajic 2019                    29.0000 [25.4277;  32.5723]        1.0
## Cakir 2020                     45.9000 [39.1764;  52.6236]        0.9
## Caparlar 2019                  41.4900 [38.2574;  44.7226]        1.0
## Carlino 2009                   33.6000 [24.7439;  42.4561]        0.9
## Chalkeidis 2010                23.7000 [21.4322;  25.9678]        1.0
## Choi 2011                      18.6000 [16.7750;  20.4250]        1.0
## Colak 2015                     13.5900 [12.0528;  15.1272]        1.0
## Colak 2019                     28.6000 [26.7885;  30.4115]        1.0
## Dashti 2014                     8.2000 [ 7.7742;   8.6258]        1.0
## Dhonneur 2008                  69.0000 [65.7637;  72.2363]        1.0
## Di Marco 2011                  59.0000 [52.0653;  65.9347]        0.9
## El-Tahan 2017                  11.7000 [ 8.8661;  14.5339]        1.0
## Enomoto 2008                   50.5000 [45.3109;  55.6891]        1.0
## Erden 2010                     31.5000 [27.6781;  35.3219]        1.0
## Goksu 2016                     42.4000 [30.8578;  53.9422]        0.9
## Gunes 2020                     22.1000 [20.5092;  23.6908]        1.0
## Gupta 2020                     25.1200 [19.8300;  30.4100]        1.0
## Hamp 2015                      75.0000 [59.6609;  90.3391]        0.8
## Hirabayashi 2009               67.0000 [58.5722;  75.4278]        0.9
## Hirabayashi 2010               72.0000 [62.7882;  81.2118]        0.9
## Hsu 2012                       62.5000 [51.8722;  73.1278]        0.9
## Hu 2017                        26.0000 [24.3997;  27.6003]        1.0
## Ilyas 2014                     50.0000 [42.0131;  57.9869]        0.9
## Inangil 2018                   14.1000 [12.7748;  15.4252]        1.0
## Jafra 2018                     20.6800 [19.9672;  21.3928]        1.0
## Jungbauer 2009                 60.0000 [44.9083;  75.0917]        0.8
## Khan 2008                      19.6000 [14.6692;  24.5308]        1.0
## Kido 2015                      20.8000 [18.4872;  23.1128]        1.0
## Kim 2013                       29.9000 [18.2526;  41.5474]        0.9
## Koh 2010                       90.0000 [70.6356; 109.3644]        0.8
## Kucukosman 2020                14.3300 [12.8414;  15.8186]        1.0
## Kumar_2 2019                   39.1700 [32.5464;  45.7936]        0.9
## Kunaz 2016                     16.8600 [15.7042;  18.0158]        1.0
## Laosuwan 2015                  23.2100 [18.8251;  27.5949]        1.0
## Lee 2013                       12.8000 [12.1908;  13.4092]        1.0
## Lim 2005                       56.2000 [46.6815;  65.7185]        0.9
## Liu 2014                       68.8000 [60.5567;  77.0433]        0.9
## Liu 2016                       28.7000 [26.1446;  31.2554]        1.0
## Liu 2019                       49.9000 [40.5763;  59.2237]        0.9
## Maharaj 2006                   12.4000 [ 9.1079;  15.6921]        1.0
## Maharaj 2007                   20.3000 [14.9532;  25.6468]        1.0
## Maharaj 2008                   47.7000 [43.9748;  51.4252]        1.0
## Mahmood 2015                   13.0700 [12.0072;  14.1328]        1.0
## Malik 2008                     11.6000 [ 9.4530;  13.7470]        1.0
## Maruyama 2008                  16.8000 [10.7460;  22.8540]        0.9
## Maruyama_2 2008                16.0000 [12.4543;  19.5457]        1.0
## Myunghun-Kim 2017              17.9000 [14.2624;  21.5376]        1.0
## Ndoko 2008                     56.0000 [49.8079;  62.1921]        0.9
## Nishiyama 2011                 36.4800 [32.0075;  40.9525]        1.0
## Pappu 2020                     29.7300 [25.5612;  33.8988]        1.0
## Parasa 2016                    27.7730 [25.9401;  29.6059]        1.0
## Pazur 2016                     34.3000 [28.4959;  40.1041]        1.0
## Peirovifar 2014                37.5500 [36.0249;  39.0751]        1.0
## Pournajafian 2014               7.8000 [ 6.7640;   8.8360]        1.0
## Ranieri 2012                   37.0000 [31.3651;  42.6349]        1.0
## Reena 2019                     40.3000 [36.3086;  44.2914]        1.0
## Serocki 2013                   11.2000 [ 9.2597;  13.1403]        1.0
## Shah 2016                      37.4100 [30.5676;  44.2524]        0.9
## Shukla 2017                    45.2900 [37.0529;  53.5271]        0.9
## Sulser 2016                    31.0000 [28.9354;  33.0646]        1.0
## Sun 2005                       30.0000 [27.4991;  32.5009]        1.0
## Taylor 2013                    21.7000 [18.9225;  24.4775]        1.0
## Teoh 2010                      22.4000 [19.7344;  25.0656]        1.0
## Toker 2019                     40.1000 [38.6032;  41.5968]        1.0
## Tolon 2012                     48.7500 [39.2967;  58.2033]        0.9
## Tsan 2020                      36.2300 [32.8299;  39.6301]        1.0
## Turkstra 2005                  17.0000 [11.7734;  22.2266]        1.0
## Wasem 2013                     17.5000 [13.9216;  21.0784]        1.0
## Yao 2015                       24.3000 [22.2914;  26.3086]        1.0
## Yumul 2016                     70.0000 [54.8631;  85.1369]        0.8
## Zhao 2014                      96.0000 [91.0210; 100.9790]        1.0
## 
## Number of studies combined: k = 105
## 
##                         mean             95%-CI
## Random effects model 33.0558 [29.3220; 36.7896]
## 
## Quantifying heterogeneity:
##  tau^2 = 364.7277 [278.5419; 493.4338]; tau = 19.0978 [16.6896; 22.2134];
##  I^2 = 99.3% [99.3%; 99.4%]; H = 12.39 [12.03; 12.77]
## 
## Test of heterogeneity:
##         Q d.f. p-value
##  15969.55  104       0
## 
## Details on meta-analytical method:
## - Inverse variance method
## - Sidik-Jonkman estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Untransformed (raw) means
results.time<-gemtc::relative.effect.table(mcmc.time.2)
write.csv2(results.time,file = "results_time.csv")

#Funnel Plot for time to intubation
funnel.time<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Tables Funnel Plots/Funnel Network Time.csv")
mcont_funnel.time_random<-meta::metacont(ftime.1,mtime.1,sdtime.1,ftime.2,mtime.2,sdtime.2,data=funnel.time,studlab = study,comb.fixed = FALSE,comb.random = TRUE,prediction = TRUE,sm="SMD")
mcont_funnel.time_random
##                                   SMD             95%-CI %W(random)
## Abdallah 2019                 -0.6764 [-1.1589; -0.1939]        0.7
## Abdelgalel 2018                0.2331 [-0.2068;  0.6729]        0.7
## Abdelgalel 2018                0.3065 [-0.1344;  0.7475]        0.7
## Abdelgawad - Hipertensos 2015 -2.1291 [-2.9201; -1.3382]        0.6
## Abdelgawad - Normotensos 2015 -2.4257 [-3.2609; -1.5905]        0.6
## Aggarwal 2019                  5.0579 [ 4.2423;  5.8735]        0.6
## Ahmad 2016                     4.0959 [ 3.5679;  4.6238]        0.7
## Ahmad 2015                     0.5655 [-0.0008;  1.1317]        0.7
## Ahmed 2017                    -0.8578 [-1.3882; -0.3274]        0.7
## Ajimi 2018                    -0.7893 [-1.3160; -0.2626]        0.7
## Akbar 2015                    -0.7636 [-1.1923; -0.3350]        0.7
## Akbarzadeh 2017               -0.0143 [-0.4897;  0.4610]        0.7
## Akbas 2019                     1.2414 [ 0.7608;  1.7220]        0.7
## Al - Ghamdi 2016               0.7604 [ 0.1391;  1.3816]        0.7
## Al - Ghamdi 2016               2.8028 [ 1.9403;  3.6652]        0.6
## Al - Ghamdi 2016               1.3875 [ 0.7230;  2.0519]        0.7
## Ali 2012                      -0.6936 [-1.2658; -0.1214]        0.7
## Ali 2015                       0.7918 [ 0.2144;  1.3693]        0.7
## Ali 2017                      -0.5621 [-1.0788; -0.0455]        0.7
## Altun 2018                    -0.4003 [-0.8431;  0.0426]        0.7
## Altun 2018                    -1.4030 [-1.8947; -0.9113]        0.7
## Amini 2015                     0.8256 [ 0.3364;  1.3148]        0.7
## Ander 2017                    -0.1410 [-0.5854;  0.3034]        0.7
## Aoi 2010                       0.2745 [-0.3822;  0.9313]        0.7
## Aqil 2016                     -0.6713 [-1.1224; -0.2203]        0.7
## Aqil 2017                     -0.5322 [-0.8695; -0.1950]        0.7
## Arici 2014                     1.2926 [ 0.8086;  1.7767]        0.7
## Arora 2013                     0.3881 [ 0.0072;  0.7690]        0.7
## Avula 2019                     1.9217 [ 1.3031;  2.5402]        0.7
## Aziz 2012                      0.4704 [ 0.2245;  0.7163]        0.7
## Bakshi 2019                    0.3914 [-0.0720;  0.8549]        0.7
## Bakshi - Exp 2015              5.6946 [ 3.9233;  7.4659]        0.4
## Bakshi - Exp 2015              5.5681 [ 3.8292;  7.3070]        0.4
## Bakshi - Nti 2015              1.1008 [ 0.2973;  1.9042]        0.6
## Bakshi - Nti 2015              1.1008 [ 0.2973;  1.9042]        0.6
## Bakshi - Nvl 2015              9.2198 [ 6.5115; 11.9281]        0.2
## Bakshi - Nvl 2015              7.9523 [ 5.5865; 10.3182]        0.3
## Barak 2007                     0.7145 [ 0.4037;  1.0253]        0.7
## Barman 2017                   -0.1143 [-0.5832;  0.3546]        0.7
## Bashir 2020                   -0.4528 [-0.8969; -0.0086]        0.7
## Bhalla 2018                    1.4217 [ 0.6086;  2.2348]        0.6
## Bhandari 2013                 -2.7051 [-3.3190; -2.0912]        0.7
## Bharti 2014                    0.6737 [ 0.0263;  1.3210]        0.7
## Bhat 2015                     -1.0098 [-1.4270; -0.5926]        0.7
## Bhola 2014                    -0.8912 [-1.3030; -0.4795]        0.7
## Bilehjani 2009                 0.9775 [ 0.5064;  1.4486]        0.7
## Blajic 2019                    0.0000 [-0.3609;  0.3609]        0.7
## Blajic 2019                   -0.3600 [-0.7224;  0.0023]        0.7
## \x82akir 2020                  0.6310 [ 0.1201;  1.1419]        0.7
## \x82aparlar 2019              -1.5320 [-2.0398; -1.0242]        0.7
## Carlino 2009                   0.4265 [-0.2985;  1.1514]        0.6
## Cattano 2012                  -0.1704 [-0.6774;  0.3367]        0.7
## Chalkeidis 2010                0.7792 [ 0.2450;  1.3134]        0.7
## Chanchayanon 2018              0.8278 [ 0.1793;  1.4764]        0.7
## Choi 2011                     -0.0782 [-0.5844;  0.4281]        0.7
## Colak 2019                     1.0554 [ 0.6131;  1.4977]        0.7
## \x82olak 2015                  0.8710 [ 0.4538;  1.2883]        0.7
## \x82olak 2015                  1.5474 [ 1.0865;  2.0083]        0.7
## Dashti 2014                    1.2810 [ 0.7173;  1.8447]        0.7
## Dhonneur 2008                 -2.7088 [-3.0832; -2.3343]        0.7
## Di Marco 2011                 -0.7686 [-1.1600; -0.3772]        0.7
## El-Shmaa 2020                 -0.0770 [-0.5154;  0.3614]        0.7
## El-Tahan 2017                  1.7259 [ 0.8548;  2.5970]        0.6
## Enomoto 2008                   0.1524 [-0.1232;  0.4281]        0.7
## Erden 2010                     1.6399 [ 0.8249;  2.4550]        0.6
## Goksu 2016                    -0.2230 [-0.5440;  0.0981]        0.7
## Gunes 2020                     1.4455 [ 1.1166;  1.7744]        0.7
## Gupta 2020                    -0.1654 [-0.6045;  0.2736]        0.7
## Hamp 2015                      0.3828 [-0.2704;  1.0359]        0.7
## Hirabayashi 2009              -0.7998 [-0.9785; -0.6211]        0.7
## Hirabayashi 2009              -0.4875 [-0.7689; -0.2062]        0.7
## Hirabayashi 2010              -0.1963 [-0.4741;  0.0816]        0.7
## Hsu 2012                      -0.7473 [-1.2719; -0.2226]        0.7
## Hu 2017                       -0.3505 [-0.6328; -0.0683]        0.7
## Ilyas 2014                     0.5321 [ 0.1794;  0.8849]        0.7
## Inangil 2018                  -0.2170 [-0.6870;  0.2530]        0.7
## Jafra 2018                     0.8912 [ 0.6003;  1.1820]        0.7
## Jungbauer 2009                -0.3395 [-0.6187; -0.0602]        0.7
## Khan 2008                      0.2393 [-0.3539;  0.8326]        0.7
## Kido 2015                     -0.6884 [-1.2603; -0.1165]        0.7
## Kim 2013                      -0.8025 [-1.4122; -0.1928]        0.7
## Koh 2010                      -0.9366 [-1.5231; -0.3502]        0.7
## K\x9f\x8d\x9fkosman 2020       0.9848 [ 0.4469;  1.5227]        0.7
## Kumar1 2019                   -0.2234 [-0.7586;  0.3118]        0.7
## Kumar2 2019                   -0.2532 [-0.7615;  0.2550]        0.7
## Kunaz 2016                     0.9117 [ 0.4991;  1.3243]        0.7
## Lange 2009                    -0.2569 [-0.7652;  0.2514]        0.7
## Laosuwan 2015                  1.3395 [ 0.3964;  2.2825]        0.6
## Lee 2013                       3.7949 [ 2.7219;  4.8678]        0.6
## Lim 2005                      -0.6018 [-1.1200; -0.0836]        0.7
## Liu 2009                      -0.9750 [-1.4721; -0.4779]        0.7
## Liu 2014                      -1.5736 [-2.0782; -1.0690]        0.7
## Liu 2016                       0.1391 [-0.1559;  0.4341]        0.7
## Liu 2019                      -0.0558 [-0.2624;  0.1508]        0.7
## Maharaj 2006                  -0.0223 [-0.5284;  0.4838]        0.7
## Maharaj 2007                  -0.7376 [-1.3804; -0.0949]        0.7
## Maharaj 2008                  -4.4937 [-5.7019; -3.2855]        0.5
## Mahmood 2015                  -0.9226 [-1.4567; -0.3885]        0.7
## Malik 2008                     1.5011 [ 0.9238;  2.0784]        0.7
## Malik 2008                     1.2009 [ 0.6481;  1.7536]        0.7
## Malik 2008                     0.7352 [ 0.2112;  1.2592]        0.7
## Markham 2019                  -0.0480 [-0.3891;  0.2931]        0.7
## Markham 2019                   0.0346 [-0.2921;  0.3613]        0.7
## Maruyama 2008                  0.9466 [ 0.0945;  1.7987]        0.6
## Maruyama 2008                  0.6037 [-0.2549;  1.4624]        0.6
## Massen 2009                   -1.2676 [-1.6986; -0.8366]        0.7
## Massen 2009                   -0.3644 [-0.7598;  0.0309]        0.7
## Myunghun-Kim 2017              0.9853 [ 0.3251;  1.6455]        0.7
## Ndoko 2008                    -1.6035 [-2.0433; -1.1638]        0.7
## Nishiyama 2011                 0.3338 [-0.1349;  0.8025]        0.7
## Nishiyama 2011                 1.2119 [ 0.6960;  1.7277]        0.7
## Nishiyama 2011                 2.0772 [ 1.4940;  2.6603]        0.7
## Pappu 2020                     1.2111 [ 0.6576;  1.7646]        0.7
## Pappu 2020                     0.0875 [-0.4188;  0.5938]        0.7
## Parasa 2016                    1.9667 [ 1.3434;  2.5901]        0.7
## Pazur 2016                    -0.0433 [-0.5870;  0.5004]        0.7
## Peirovifar 2014               -1.6773 [-2.4080; -0.9465]        0.6
## Pournajafian 2014              1.4975 [ 1.0401;  1.9549]        0.7
## Ranieri 2012                  -1.4154 [-1.7983; -1.0326]        0.7
## Raza 2017                     -1.0102 [-1.5497; -0.4707]        0.7
## Reena 2019                    -0.9104 [-1.3230; -0.4978]        0.7
## Sahajanandan 2019              0.1209 [-0.3735;  0.6153]        0.7
## Sen 2020                       0.7930 [ 0.2661;  1.3200]        0.7
## Serocki 2013                   0.8107 [ 0.2998;  1.3217]        0.7
## Serocki 2013                   0.6989 [ 0.1891;  1.2087]        0.7
## Shah 2016                     -0.3297 [-0.8438;  0.1844]        0.7
## Shim 2016                      0.0298 [-0.4806;  0.5403]        0.7
## Shravanalakshmi 2017           0.2609 [-0.1541;  0.6759]        0.7
## Shravanalakshmi 2017           0.5599 [ 0.1383;  0.9815]        0.7
## Shukla 2017                   -0.5891 [-1.0373; -0.1410]        0.7
## Sulser 2016                    0.0989 [-0.2246;  0.4224]        0.7
## Sun 2005                       1.1310 [ 0.8321;  1.4300]        0.7
## Taylor 2013                    0.8800 [ 0.4415;  1.3185]        0.7
## Teoh 2010                     -0.1424 [-0.4199;  0.1352]        0.7
## Teoh 2010                      0.6017 [ 0.3182;  0.8852]        0.7
## Teoh 2010                      0.6123 [ 0.3286;  0.8960]        0.7
## Toker 2019                    -1.0109 [-1.4281; -0.5936]        0.7
## Tolon 2012                    -0.8071 [-1.4543; -0.1599]        0.7
## Tsan 2020                      0.6173 [ 0.2755;  0.9590]        0.7
## Turkstra 2005                  0.9339 [-0.0524;  1.9202]        0.6
## Van Zundert 2009               0.9677 [ 0.7283;  1.2071]        0.7
## Van Zundert 2009               1.0875 [ 0.8449;  1.3302]        0.7
## Wan 2016                      -0.9701 [-1.4156; -0.5247]        0.7
## Wasem 2013                     0.1881 [-0.3192;  0.6954]        0.7
## Yao 2015                       1.7045 [ 1.2347;  2.1742]        0.7
## Yumul 2016                    -0.5870 [-1.1004; -0.0736]        0.7
## Yumul 2016                    -0.0254 [-0.5274;  0.4766]        0.7
## Yumul 2016                    -0.2101 [-0.7136;  0.2933]        0.7
## Zhao 2014                     -1.2951 [-1.6491; -0.9411]        0.7
## 
## Number of studies combined: k = 149
## 
##                         SMD            95%-CI    z p-value
## Random effects model 0.2397 [ 0.0799; 0.3995] 2.94  0.0033
## Prediction interval         [-1.6463; 2.1257]             
## 
## Quantifying heterogeneity:
##  tau^2 = 0.9041 [1.0553; 1.7503]; tau = 0.9508 [1.0273; 1.3230];
##  I^2 = 94.8% [94.3%; 95.3%]; H = 4.38 [4.17; 4.61]
## 
## Test of heterogeneity:
##        Q d.f. p-value
##  2843.81  148       0
## 
## Details on meta-analytical method:
## - Inverse variance method
## - DerSimonian-Laird estimator for tau^2
## - Jackson method for confidence interval of tau^2 and tau
## - Hedges' g (bias corrected standardised mean difference)
meta::funnel(mcont_funnel.time_random,ref.triangle=TRUE,contour.levels=c(0.9,0.95,0.99),col.contour=c("darkblue","blue","lightblue"))

dmetar::eggers.test(mcont_funnel.time_random)
##              Intercept ConfidenceInterval     t       p
## Egger's test     2.228        0.268-4.188 2.212 0.02847

#Forest Plot for comparative ORs of all outcomes

forest.all<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Table for Overall Forest Plot.csv")
forest.all$dv<-factor(forest.all$dv,levels = c("Failed Intubation","Failed First Attempt","Failed 2 Attempts","Difficult Intubation","POGO","Difficult Laryngoscopy","Time for Intubation"))
forest.all$id<-factor(forest.all$id,levels = c ("C-MAC Miller","Pentax AWS Miller","Imago V-Blade","Tosight", "APA DAB","KingVision channeled","Macintosh","McGrath series 5","UESCOPE MAC","CEL-100","APA MAC","Glidescope", "Truview","Airtraq","Airtraq non-channeled","Pentax AWS","McGrath MAC","KingVision non-channeled", "C-MAC D","C-MAC"))
scales_x<-list(`Failed Intubation`=scale_x_continuous(limits = c(-6,6)),
               `Failed First Attempt`=scale_x_continuous(limits = c(-5,5)),
               `Failed 2 Attempts`=scale_x_continuous(limits = c(-21,3)),
               `Difficult Intubation`=scale_x_continuous(limits = c(-6,6)),
               `POGO`=scale_x_continuous(limits = c(-25,75)),
               `Difficult Laryngoscopy`=scale_x_continuous(limits = c(-8,3)),
               `Time for Intubation`=scale_x_continuous(limits = c(-30,30)))
dat_text<-data.frame(
  label=c("NA","NA","NA","NA","NA","NA","NA","NA","NA","NA","NA","NA","NA","NA","NA","NA","NA","NA","NA"),
  dv=c("Failed Intubation","Failed Intubation","Failed Intubation","Failed Intubation","Failed First Attempt","Failed 2 Attempts","Failed 2 Attempts","Failed 2 Attempts","Failed 2 Attempts","Failed 2 Attempts","Difficult Intubation","Difficult Intubation","Difficult Intubation","Difficult Intubation","Difficult Intubation","Difficult Intubation","Difficult Intubation","Difficult Intubation","Difficult Laryngoscopy"),
  x=c(-1.5,-1.5,-1.5,-1.5,-1.2,-3,-3,-3,-3,-3,-1.5,-1.5,-1.5,-1.5,-1.5,-1.5,-1.5,-1.5,-1.3),
  y=c(1,2,3,4,1,1,4,5,9,15,1,2,4,5,9,10,11,15,4),
  log.ci.l=forest.all$ci.l[1:19], 
  log.ci.u=forest.all$log.ci.u[1:19]
  )
dat_text$dv<-factor(dat_text$dv,levels = c("Failed Intubation","Failed First Attempt","Failed 2 Attempts","Difficult Intubation","Difficult Laryngoscopy"))

dat_text.2<-data.frame(
  label=c("NA","NA","NA","NA","NA","NA","NA","NA","NA","NA","NA","NA","NA","NA","NA","NA"),
  dv=c("POGO","POGO","POGO","POGO","POGO","POGO","POGO","POGO","POGO","POGO","Time for Intubation","Time for Intubation","Time for Intubation","Time for Intubation","Time for Intubation","Time for Intubation"),
  x=c(-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-6,-6,-6,-6,-6,-6),
  y=c(1,2,3,4,5,9,10,11,15,16,1,3,5,10,11,15),
  log.ci.l=forest.all$ci.l[1:16], 
  log.ci.u=forest.all$log.ci.u[1:16]
)
dat_text.2$dv<-factor(dat_text.2$dv,levels = c("POGO","Time for Intubation"))

dat_text.macintosh<-data.frame(
  label=c("Reference","Reference","Reference","Reference","Reference","Reference","Reference"),
    dv=c("Failed Intubation","Failed First Attempt","Failed 2 Attempts","Difficult Intubation","POGO","Difficult Laryngoscopy","Time for Intubation"),
    x=c(-3,-2.5,-8,-3,25,-2.7,-15),
    y=c(7,7,7,7,7,7,7),
    log.ci.l=forest.all$ci.l[1:7], 
    log.ci.u=forest.all$log.ci.u[1:7]
)
dat_text.macintosh$dv<-factor(dat_text.macintosh$dv,levels = c("Failed Intubation","Failed First Attempt","Failed 2 Attempts","Difficult Intubation","POGO","Difficult Laryngoscopy","Time for Intubation"))

arrow<-data.frame(
  dv=c("Failed 2 Attempts"),
  x=c(-21),
  y=c(12),
  log.ci.l=forest.all$ci.l[1], 
  log.ci.u=forest.all$log.ci.u[1])

base_1<-data.frame(
  label=c("Favours VLs","Log OR","Favours ML","Favours VLs","Log OR","Favours ML","Favours VLs","Log OR","ML","Favours VLs","Log OR","Favours ML","ML","MD","Favours VLs","Favours VLs","Log OR","ML","Favours VLs","MD","Favours ML"),
  dv=c("Failed Intubation","Failed Intubation","Failed Intubation","Failed First Attempt","Failed First Attempt","Failed First Attempt","Failed 2 Attempts","Failed 2 Attempts","Failed 2 Attempts","Difficult Intubation","Difficult Intubation","Difficult Intubation","POGO","POGO","POGO","Difficult Laryngoscopy","Difficult Laryngoscopy","Difficult Laryngoscopy","Time for Intubation","Time for Intubation","Time for Intubation"),
  x=c(-3,0,3,-2.5,0,2.5,-10,-9,1.7,-3,0,3,-13,25,35,-4,-2.5,1.5,-16,0,16),
  y=c(-0.6,-1.5,-0.6,-0.6,-1.5,-0.6,-0.6,-1.5,-0.6,-0.6,-1.5,-0.6,-0.6,-1.5,-0.6,-0.6,-1.5,-0.6,-0.6,-1.5,-0.6),
  log.ci.l=forest.all$ci.l[1:21], 
  log.ci.u=forest.all$log.ci.u[1:21]
)
base_1$dv<-factor(base_1$dv,levels = c("Failed Intubation","Failed First Attempt","Failed 2 Attempts","Difficult Intubation","POGO","Difficult Laryngoscopy","Time for Intubation"))


base_2<-data.frame(
  dv=c("Failed Intubation","Failed Intubation","Failed Intubation","Failed First Attempt","Failed First Attempt","Failed First Attempt","Failed 2 Attempts","Failed 2 Attempts","Failed 2 Attempts","Difficult Intubation","Difficult Intubation","Difficult Intubation","POGO","POGO","POGO","Difficult Laryngoscopy","Difficult Laryngoscopy","Difficult Laryngoscopy","Time for Intubation","Time for Intubation","Time for Intubation"),
  x=c(-6,0,6,-5,0,5,-21,0,3,-6,0,6,-25,0,75,-8,0,3,-30,0,30),
  y=c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
  xend=c(-6,0,6,-5,0,5,-21,0,3,-6,0,6,-25,0,75,-8,0,3,-30,0,30),
  yend=c(-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2),
  log.ci.l=forest.all$ci.l[1:21], 
  log.ci.u=forest.all$log.ci.u[1:21]
)
base_2$dv<-factor(base_2$dv,levels = c("Failed Intubation","Failed First Attempt","Failed 2 Attempts","Difficult Intubation","POGO","Difficult Laryngoscopy","Time for Intubation"))

base_3<-data.frame(
  dv=c("Failed Intubation","Failed First Attempt","Failed 2 Attempts","Difficult Intubation","POGO","Difficult Laryngoscopy","Time for Intubation"),
  x=c(-6,-5,-21,-6,-25,-8,-30),
  y=c(0,0,0,0,0,0,0),
  xend=c(6,5,3,6,75,3,30),
  yend=c(0,0,0,0,0,0,0),
  log.ci.l=forest.all$ci.l[1:7], 
  log.ci.u=forest.all$log.ci.u[1:7]
)
base_3$dv<-factor(base_3$dv,levels = c("Failed Intubation","Failed First Attempt","Failed 2 Attempts","Difficult Intubation","POGO","Difficult Laryngoscopy","Time for Intubation"))

base_4<-data.frame(
  label=c("-6","0","6","-5","0","5","-21","0","3","-6","0","6","-25","0","75","-8","0","3","-30","0","30"),
  dv=c("Failed Intubation","Failed Intubation","Failed Intubation","Failed First Attempt","Failed First Attempt","Failed First Attempt","Failed 2 Attempts","Failed 2 Attempts","Failed 2 Attempts","Difficult Intubation","Difficult Intubation","Difficult Intubation","POGO","POGO","POGO","Difficult Laryngoscopy","Difficult Laryngoscopy","Difficult Laryngoscopy","Time for Intubation","Time for Intubation","Time for Intubation"),
  x=c(-6,0,6,-5,0,5,-21,0,3,-6,0,6,-25,0,75,-8,0,3,-30,0,30),
  y=c(-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6),
  log.ci.l=forest.all$ci.l[1:21], 
  log.ci.u=forest.all$log.ci.u[1:21]
)
base_4$dv<-factor(base_4$dv,levels = c("Failed Intubation","Failed First Attempt","Failed 2 Attempts","Difficult Intubation","POGO","Difficult Laryngoscopy","Time for Intubation"))

ggplot(data=forest.all,aes(y=id,x=log.pe,xmin=log.ci.l,xmax=log.ci.u,ymin=-2))+
  geom_point(color="blue",shape="square")+
  facet_grid_sc(cols = vars(dv), scales = list(x = scales_x,y="free"))+
  geom_errorbarh(aes(xmin=log.ci.l,xmax=log.ci.u),height=0.3,size=0.3)+
  geom_segment(x=0,y=0,xend=0,yend=21,lty="dashed")+
  ylab("Videolaryngoscopes")+
  theme(panel.background = element_blank())+
  theme(axis.title.x.bottom = element_blank())+
  theme(axis.line.x.bottom = element_blank())+
  theme(axis.text.x.bottom = element_blank())+
  theme(axis.ticks.x.bottom = element_blank())+
  geom_text(data=dat_text, mapping=aes(x=x,y=y,label=label),size=2.5)+
  geom_text(data=dat_text.2,mapping = aes(x=x,y=y,label=label),size=2.5)+
  geom_text(data=dat_text.macintosh,mapping = aes(x=x,y=y,label=label),size=3.5,fontface="italic")+
  geom_text(data=base_1,mapping = aes(x=x,y=y,label=label),size=3)+
  geom_segment(data=arrow,y=11,yend=11,x=forest.all$log.pe[44],xend=-22.2,arrow = arrow(length = unit(0.2,"cm"),type = "closed"),size=0.4)+
  geom_segment(data = base_2,mapping = aes(x=x,y=y,xend=xend,yend=yend))+
  geom_segment(data=base_3,mapping=aes(x=x,y=y,xend=xend,yend=yend))+
  geom_text(data=base_4,mapping = aes(x=x,y=y,label=label),size=3)+
  theme_bw()
## Warning: Removed 7 rows containing missing values (geom_point).
## Warning: Removed 7 rows containing missing values (geom_errorbarh).

Network meta-analysis of failed intubation for experienced operators

net.fail.experienced<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Table Analysis Network Failed Intubation Experienced.csv")
sum(net.fail.experienced$sampleSize)
## [1] 11647
table(net.fail.experienced$treatment)
## 
##                  Airtraq    Airtraq_Non_channeled                  APA_DAB 
##                       27                        1                        1 
##                  APA_MAC                  C_MAC_D                C_MAC_MAC 
##                        1                        8                       16 
##                  CEL_100               Glidescope     Kingvision_Channeled 
##                        1                       30                        9 
## Kingvision_Non_channeled                Macintosh              McGrath_MAC 
##                        4                       89                        8 
##         McGrath_Series_5               Pentax_AWS                  Truview 
##                       14                       12                        8 
##                      U_E 
##                        3
id<-c("Airtraq","Airtraq_Non_channeled","APA_DAB","APA_MAC","C_MAC_D","C_MAC_MAC","CEL_100","Glidescope","Kingvision_Channeled","Kingvision_Non_channeled","Macintosh","McGrath_MAC","McGrath_Series_5","Pentax_AWS","Truview","U_E")
description<-c("Airtraq","Airtraq Non-channeled","APA DAB","APA MAC","C-MAC D","C-MAC","CEL 100","Glidescope","KingVision Channeled","KingVision Non-channeled","Macintosh","McGrath MAC","McGrath Series 5", "Pentax AWS","Truview","UESCOPE MAC")
treat.codes.fail.experienced<-data.frame(id,description)
graph.fail.experienced<-gemtc::mtc.network(data.ab = net.fail.experienced,treatments = treat.codes.fail.experienced)
summary(graph.fail.experienced)
## $Description
## [1] "MTC dataset: Network"
## 
## $`Studies per treatment`
##                  Airtraq    Airtraq_Non_channeled                  APA_DAB 
##                       27                        1                        1 
##                  APA_MAC                  C_MAC_D                C_MAC_MAC 
##                        1                        8                       16 
##                  CEL_100               Glidescope     Kingvision_Channeled 
##                        1                       30                        9 
## Kingvision_Non_channeled                Macintosh              McGrath_MAC 
##                        4                       89                        8 
##         McGrath_Series_5               Pentax_AWS                  Truview 
##                       14                       12                        8 
##                      U_E 
##                        3 
## 
## $`Number of n-arm studies`
## 2-arm 4-arm 
##   114     1 
## 
## $`Studies per treatment comparison`
##                          t1                       t2 nr
## 1                   Airtraq                  APA_DAB  1
## 2                   Airtraq                  C_MAC_D  1
## 3                   Airtraq               Glidescope  3
## 4                   Airtraq     Kingvision_Channeled  2
## 5                   Airtraq                Macintosh 17
## 6                   Airtraq              McGrath_MAC  1
## 7                   Airtraq         McGrath_Series_5  1
## 8                   Airtraq               Pentax_AWS  1
## 9     Airtraq_Non_channeled                Macintosh  1
## 10                  APA_MAC                Macintosh  1
## 11                  C_MAC_D                C_MAC_MAC  1
## 12                  C_MAC_D               Glidescope  1
## 13                  C_MAC_D     Kingvision_Channeled  1
## 14                  C_MAC_D                Macintosh  4
## 15                C_MAC_MAC               Glidescope  2
## 16                C_MAC_MAC Kingvision_Non_channeled  1
## 17                C_MAC_MAC                Macintosh 10
## 18                C_MAC_MAC              McGrath_MAC  1
## 19                C_MAC_MAC         McGrath_Series_5  2
## 20                C_MAC_MAC               Pentax_AWS  1
## 21                  CEL_100                Macintosh  1
## 22               Glidescope     Kingvision_Channeled  1
## 23               Glidescope Kingvision_Non_channeled  1
## 24               Glidescope                Macintosh 19
## 25               Glidescope         McGrath_Series_5  2
## 26               Glidescope               Pentax_AWS  3
## 27     Kingvision_Channeled                Macintosh  5
## 28 Kingvision_Non_channeled                Macintosh  2
## 29                Macintosh              McGrath_MAC  4
## 30                Macintosh         McGrath_Series_5  9
## 31                Macintosh               Pentax_AWS  8
## 32                Macintosh                  Truview  7
## 33                Macintosh                      U_E  3
## 34              McGrath_MAC               Pentax_AWS  1
## 35              McGrath_MAC                  Truview  1
plot(graph.fail.experienced,use.description = TRUE,vertex.shape="circle",vertex.color="white",vertex.size=15,vertex.label.color="dark blue",vertex.label.cex=1.0,edge.color="light blue")

model.fail.experienced <- mtc.model(graph.fail.experienced,likelihood = "binom",link = "logit",linearModel = "random",n.chain = 4)
mcmc.fail.1.experienced <- mtc.run(model.fail.experienced, n.adapt = 50, n.iter = 1000, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 232
##    Unobserved stochastic nodes: 248
##    Total graph size: 4666
## 
## Initializing model
## Warning in rjags::jags.model(file.model, data = syntax[["data"]], inits =
## syntax[["inits"]], : Adaptation incomplete
## NOTE: Stopping adaptation
summary(mcmc.fail.1.experienced)
## 
## Results on the Log Odds Ratio scale
## 
## Iterations = 60:1050
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 100 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                         Mean     SD Naive SE Time-series SE
## d.Airtraq.APA_DAB                     3.0512 2.2548  0.11274        0.12133
## d.Airtraq.C_MAC_D                    -1.2448 2.0613  0.10306        0.18852
## d.Airtraq.Glidescope                 -0.3795 1.0192  0.05096        0.08109
## d.Airtraq.Kingvision_Channeled        2.0900 1.7375  0.08688        0.14525
## d.Airtraq.Macintosh                   1.8060 0.8948  0.04474        0.08967
## d.Airtraq.McGrath_MAC                -0.2505 1.8687  0.09344        0.12370
## d.Airtraq.McGrath_Series_5            0.2479 1.3540  0.06770        0.08914
## d.Airtraq.Pentax_AWS                 -0.5804 1.4479  0.07239        0.10149
## d.Macintosh.Airtraq_Non_channeled    -1.6042 2.1811  0.10905        0.09924
## d.Macintosh.APA_MAC                  -0.6108 2.3419  0.11710        0.11714
## d.Macintosh.C_MAC_MAC                -2.8755 1.3864  0.06932        0.13263
## d.Macintosh.CEL_100                  -0.5584 2.5480  0.12740        0.11508
## d.Macintosh.Kingvision_Non_channeled -2.6092 2.3862  0.11931        0.15315
## d.Macintosh.Truview                  -0.6248 2.3520  0.11760        0.19955
## d.Macintosh.U_E                      -0.3424 2.2860  0.11430        0.11169
## sd.d                                  2.1820 0.4736  0.02368        0.05721
## 
## 2. Quantiles for each variable:
## 
##                                        2.5%    25%     50%     75%  97.5%
## d.Airtraq.APA_DAB                    -1.471  1.535  3.0151  4.5143 7.4678
## d.Airtraq.C_MAC_D                    -4.947 -2.567 -1.3455  0.2012 2.8266
## d.Airtraq.Glidescope                 -2.370 -1.106 -0.3726  0.3212 1.5666
## d.Airtraq.Kingvision_Channeled       -1.141  0.901  1.9877  3.3527 5.8489
## d.Airtraq.Macintosh                   0.148  1.216  1.8075  2.3607 3.5412
## d.Airtraq.McGrath_MAC                -3.841 -1.398 -0.2992  0.9938 3.4508
## d.Airtraq.McGrath_Series_5           -2.474 -0.682  0.2645  1.0900 3.0100
## d.Airtraq.Pentax_AWS                 -3.632 -1.541 -0.5715  0.4109 2.3203
## d.Macintosh.Airtraq_Non_channeled    -5.927 -2.911 -1.4999 -0.2082 2.3483
## d.Macintosh.APA_MAC                  -5.304 -2.178 -0.6248  1.0148 3.8601
## d.Macintosh.C_MAC_MAC                -5.705 -3.756 -2.8995 -1.8915 0.1399
## d.Macintosh.CEL_100                  -5.665 -2.205 -0.6061  1.1692 4.1303
## d.Macintosh.Kingvision_Non_channeled -7.528 -4.180 -2.4555 -1.0434 2.0872
## d.Macintosh.Truview                  -5.585 -2.158 -0.6492  0.9669 4.0301
## d.Macintosh.U_E                      -4.610 -1.713 -0.1043  0.9233 3.9191
## sd.d                                  1.249  1.857  2.2003  2.5676 2.9451
## 
## -- Model fit (residual deviance):
## 
##      Dbar        pD       DIC 
##  98.87591  76.57004 175.44594 
## 
## 232 data points, ratio 0.4262, I^2 = 0%
mcmc.fail.2.experienced <- mtc.run(model.fail.experienced, n.adapt = 5000, n.iter = 1e5, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 232
##    Unobserved stochastic nodes: 248
##    Total graph size: 4666
## 
## Initializing model
summary(mcmc.fail.2.experienced)
## 
## Results on the Log Odds Ratio scale
## 
## Iterations = 5010:105000
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 10000 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                         Mean     SD Naive SE Time-series SE
## d.Airtraq.APA_DAB                     3.1086 2.1865 0.010933       0.011092
## d.Airtraq.C_MAC_D                    -1.2579 1.9939 0.009969       0.015474
## d.Airtraq.Glidescope                 -0.3860 1.0047 0.005023       0.008552
## d.Airtraq.Kingvision_Channeled        1.9642 1.8170 0.009085       0.017636
## d.Airtraq.Macintosh                   1.8352 0.8805 0.004402       0.009036
## d.Airtraq.McGrath_MAC                -0.2089 1.6981 0.008490       0.010613
## d.Airtraq.McGrath_Series_5            0.3577 1.3269 0.006634       0.009900
## d.Airtraq.Pentax_AWS                 -0.4674 1.4237 0.007119       0.010801
## d.Macintosh.Airtraq_Non_channeled    -1.6167 2.1764 0.010882       0.010882
## d.Macintosh.APA_MAC                  -0.4021 2.1798 0.010899       0.010899
## d.Macintosh.C_MAC_MAC                -2.7899 1.4261 0.007130       0.015073
## d.Macintosh.CEL_100                  -0.5187 2.3818 0.011909       0.012064
## d.Macintosh.Kingvision_Non_channeled -2.6037 2.1697 0.010849       0.012820
## d.Macintosh.Truview                  -0.4208 2.3349 0.011674       0.018863
## d.Macintosh.U_E                      -0.3941 2.1976 0.010988       0.011785
## sd.d                                  2.0915 0.5062 0.002531       0.008312
## 
## 2. Quantiles for each variable:
## 
##                                         2.5%     25%     50%      75%   97.5%
## d.Airtraq.APA_DAB                    -1.3030  1.7401  3.1117  4.46724  7.5673
## d.Airtraq.C_MAC_D                    -5.3509 -2.5256 -1.2106  0.04951  2.5766
## d.Airtraq.Glidescope                 -2.4165 -1.0304 -0.3797  0.26950  1.6018
## d.Airtraq.Kingvision_Channeled       -1.3874  0.7541  1.8563  3.07338  5.9046
## d.Airtraq.Macintosh                   0.2181  1.2397  1.7846  2.38783  3.6820
## d.Airtraq.McGrath_MAC                -3.5299 -1.3106 -0.2341  0.86498  3.2161
## d.Airtraq.McGrath_Series_5           -2.2516 -0.5058  0.3427  1.20288  3.0406
## d.Airtraq.Pentax_AWS                 -3.3218 -1.3935 -0.4592  0.45893  2.3538
## d.Macintosh.Airtraq_Non_channeled    -5.9610 -2.9674 -1.6259 -0.26173  2.7902
## d.Macintosh.APA_MAC                  -4.7813 -1.7730 -0.3997  0.95466  4.0439
## d.Macintosh.C_MAC_MAC                -5.7729 -3.6828 -2.7291 -1.83767 -0.1343
## d.Macintosh.CEL_100                  -5.2699 -2.0576 -0.5156  1.01939  4.2415
## d.Macintosh.Kingvision_Non_channeled -6.9902 -3.9648 -2.5934 -1.23981  1.7338
## d.Macintosh.Truview                  -5.2180 -1.9023 -0.3938  1.10581  4.1386
## d.Macintosh.U_E                      -4.8198 -1.7759 -0.3907  0.97598  4.0025
## sd.d                                  1.0730  1.7296  2.1101  2.49480  2.9305
## 
## -- Model fit (residual deviance):
## 
##      Dbar        pD       DIC 
## 100.35853  76.88663 177.24516 
## 
## 232 data points, ratio 0.4326, I^2 = 0%
gelman.plot(mcmc.fail.1.experienced)

gelman.plot(mcmc.fail.2.experienced)

gelman.diag(mcmc.fail.1.experienced)$mpsrf
## [1] 1.129592
gelman.diag(mcmc.fail.2.experienced)$mpsrf
## [1] 1.001338
anohe.fail.experienced<-gemtc::mtc.anohe(graph.fail.experienced,n.adapt = 5000, n.iter = 1e5, thin = 10,sampler="rjags")
summary(anohe.fail.experienced)
nodesplit.fail.experienced <- gemtc::mtc.nodesplit(graph.fail.experienced, linearModel = "random", likelihood = "binom",link = "logit",n.adapt = 5000, n.iter = 1e5, thin = 10)
summary(nodesplit.fail.experienced)
forest(gemtc::relative.effect(mcmc.fail.2.experienced, t1 = "Macintosh"), use.description = TRUE,xlim = c(-10, 10))

rank.probability.fail.experienced <- gemtc::rank.probability(mcmc.fail.2.experienced, preferredDirection = -1)
plot(rank.probability.fail.experienced, beside=TRUE)

sucra.fail.experienced <- dmetar::sucra(rank.probability.fail.experienced, lower.is.better = FALSE)
sucra.fail.experienced
##                              SUCRA
## C_MAC_D                  0.7514167
## C_MAC_MAC                0.7421200
## Kingvision_Non_channeled 0.6871750
## Pentax_AWS               0.6731333
## Glidescope               0.6696967
## McGrath_MAC              0.6184283
## Airtraq                  0.5915317
## Airtraq_Non_channeled    0.5470083
## McGrath_Series_5         0.5224500
## CEL_100                  0.3926183
## Truview                  0.3739033
## APA_MAC                  0.3681650
## U_E                      0.3680017
## Kingvision_Channeled     0.2817367
## Macintosh                0.2373450
## APA_DAB                  0.1752700

Network meta-analysis of failed intubation for non-experienced operators

net.fail.non_experienced<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Table Analysis Network Failed Intubation Non-experienced.csv")
sum(net.fail.non_experienced$sampleSize)
## [1] 4746
table(net.fail.non_experienced$treatment)
## 
##                  Airtraq                  APA_MAC                C_MAC_MAC 
##                        7                        1                        7 
##               Glidescope     Kingvision_Channeled Kingvision_Non_channeled 
##                       16                        5                        2 
##                Macintosh              McGrath_MAC         McGrath_Series_5 
##                       31                        7                        4 
##               Pentax_AWS                  Truview 
##                        2                        4
id<-c("Airtraq","APA_MAC","C_MAC_MAC","Glidescope","Kingvision_Channeled","Kingvision_Non_channeled","Macintosh","McGrath_MAC","McGrath_Series_5","Pentax_AWS","Truview")
description<-c("Airtraq","APA MAC","C-MAC","Glidescope","KingVision Channeled","KingVision Non-channeled","Macintosh","McGrath MAC","McGrath Series 5", "Pentax AWS","Truview")
treat.codes.fail.non_experienced<-data.frame(id,description)
graph.fail.non_experienced<-gemtc::mtc.network(data.ab = net.fail.non_experienced,treatments = treat.codes.fail.non_experienced)
summary(graph.fail.non_experienced)
## $Description
## [1] "MTC dataset: Network"
## 
## $`Studies per treatment`
##                  Airtraq                  APA_MAC                C_MAC_MAC 
##                        7                        1                        7 
##               Glidescope     Kingvision_Channeled Kingvision_Non_channeled 
##                       16                        5                        2 
##                Macintosh              McGrath_MAC         McGrath_Series_5 
##                       31                        7                        4 
##               Pentax_AWS                  Truview 
##                        2                        4 
## 
## $`Number of n-arm studies`
## 2-arm 3-arm 4-arm 
##    33     4     2 
## 
## $`Studies per treatment comparison`
##                          t1                       t2 nr
## 1                   Airtraq                C_MAC_MAC  1
## 2                   Airtraq               Glidescope  3
## 3                   Airtraq     Kingvision_Channeled  1
## 4                   Airtraq Kingvision_Non_channeled  1
## 5                   Airtraq                Macintosh  5
## 6                   APA_MAC                C_MAC_MAC  1
## 7                   APA_MAC     Kingvision_Channeled  1
## 8                 C_MAC_MAC     Kingvision_Channeled  1
## 9                 C_MAC_MAC                Macintosh  4
## 10                C_MAC_MAC         McGrath_Series_5  1
## 11               Glidescope     Kingvision_Channeled  2
## 12               Glidescope Kingvision_Non_channeled  2
## 13               Glidescope                Macintosh 12
## 14               Glidescope              McGrath_MAC  2
## 15     Kingvision_Channeled Kingvision_Non_channeled  1
## 16     Kingvision_Channeled                Macintosh  2
## 17     Kingvision_Channeled              McGrath_MAC  1
## 18 Kingvision_Non_channeled                Macintosh  1
## 19                Macintosh              McGrath_MAC  4
## 20                Macintosh         McGrath_Series_5  3
## 21                Macintosh               Pentax_AWS  2
## 22                Macintosh                  Truview  4
## 23         McGrath_Series_5                  Truview  2
plot(graph.fail.non_experienced,use.description = TRUE,vertex.shape="circle",vertex.color="white",vertex.size=15,vertex.label.color="dark blue",vertex.label.cex=1.0,edge.color="light blue")

model.fail.non_experienced <- mtc.model(graph.fail.non_experienced,likelihood = "binom",link = "logit",linearModel = "random",n.chain = 4)
mcmc.fail.1.non_experienced <- mtc.run(model.fail.non_experienced, n.adapt = 50, n.iter = 1000, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 86
##    Unobserved stochastic nodes: 97
##    Total graph size: 1861
## 
## Initializing model
## Warning in rjags::jags.model(file.model, data = syntax[["data"]], inits =
## syntax[["inits"]], : Adaptation incomplete
## NOTE: Stopping adaptation
summary(mcmc.fail.1.non_experienced)
## 
## Results on the Log Odds Ratio scale
## 
## Iterations = 60:1050
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 100 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                         Mean     SD Naive SE Time-series SE
## d.C_MAC_MAC.Airtraq                   1.6669 1.2648  0.06324        0.09316
## d.C_MAC_MAC.APA_MAC                   0.2499 1.7362  0.08681        0.09521
## d.C_MAC_MAC.Kingvision_Channeled      3.7738 1.4156  0.07078        0.09167
## d.C_MAC_MAC.Macintosh                 1.5190 0.8412  0.04206        0.05703
## d.C_MAC_MAC.McGrath_Series_5          3.8948 1.3115  0.06557        0.09063
## d.Macintosh.Glidescope                0.1219 0.8210  0.04105        0.06320
## d.Macintosh.Kingvision_Non_channeled  0.2827 1.7993  0.08996        0.10721
## d.Macintosh.McGrath_MAC              -1.4568 1.4663  0.07332        0.10258
## d.Macintosh.Pentax_AWS               13.3744 7.3529  0.36764        1.51305
## d.Macintosh.Truview                  -1.3742 1.7308  0.08654        0.15983
## sd.d                                  1.4518 0.5889  0.02945        0.06110
## 
## 2. Quantiles for each variable:
## 
##                                          2.5%     25%      50%     75%  97.5%
## d.C_MAC_MAC.Airtraq                  -0.51059  0.8351  1.50719  2.5056  4.435
## d.C_MAC_MAC.APA_MAC                  -3.62938 -0.7473  0.31452  1.3273  3.708
## d.C_MAC_MAC.Kingvision_Channeled      1.08961  2.9263  3.77965  4.5138  6.629
## d.C_MAC_MAC.Macintosh                -0.08856  0.9855  1.49837  2.0476  3.130
## d.C_MAC_MAC.McGrath_Series_5          1.33073  3.0163  3.89284  4.6490  6.716
## d.Macintosh.Glidescope               -1.31562 -0.4248  0.06506  0.6466  1.871
## d.Macintosh.Kingvision_Non_channeled -3.44802 -0.8643  0.31156  1.2370  3.834
## d.Macintosh.McGrath_MAC              -4.69926 -2.3593 -1.36569 -0.5099  1.238
## d.Macintosh.Pentax_AWS                1.29911  6.6836 14.08360 19.2634 26.480
## d.Macintosh.Truview                  -5.00236 -2.3865 -1.35851 -0.2723  2.012
## sd.d                                  0.42413  1.0162  1.37914  1.8408  2.768
## 
## -- Model fit (residual deviance):
## 
##     Dbar       pD      DIC 
## 50.01426 38.76496 88.77923 
## 
## 86 data points, ratio 0.5816, I^2 = 0%
mcmc.fail.2.non_experienced <- mtc.run(model.fail.non_experienced, n.adapt = 5000, n.iter = 1e5, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 86
##    Unobserved stochastic nodes: 97
##    Total graph size: 1861
## 
## Initializing model
summary(mcmc.fail.2.non_experienced)
## 
## Results on the Log Odds Ratio scale
## 
## Iterations = 5010:105000
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 10000 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                          Mean      SD Naive SE Time-series SE
## d.C_MAC_MAC.Airtraq                   1.45507  1.3144 0.006572       0.014236
## d.C_MAC_MAC.APA_MAC                   0.32015  1.7870 0.008935       0.013705
## d.C_MAC_MAC.Kingvision_Channeled      3.62897  1.3142 0.006571       0.009641
## d.C_MAC_MAC.Macintosh                 1.43381  0.8712 0.004356       0.007131
## d.C_MAC_MAC.McGrath_Series_5          3.77059  1.3909 0.006955       0.015238
## d.Macintosh.Glidescope                0.09271  0.8437 0.004218       0.006201
## d.Macintosh.Kingvision_Non_channeled  0.20770  1.7551 0.008776       0.011899
## d.Macintosh.McGrath_MAC              -1.40217  1.5706 0.007853       0.014903
## d.Macintosh.Pentax_AWS               20.24842 14.9158 0.074579       1.665905
## d.Macintosh.Truview                  -1.45845  1.9216 0.009608       0.019528
## sd.d                                  1.38090  0.6791 0.003395       0.014261
## 
## 2. Quantiles for each variable:
## 
##                                         2.5%     25%      50%     75%  97.5%
## d.C_MAC_MAC.Airtraq                  -0.7467  0.5705  1.29823  2.1860  4.527
## d.C_MAC_MAC.APA_MAC                  -3.2685 -0.7872  0.30118  1.3961  3.972
## d.C_MAC_MAC.Kingvision_Channeled      1.1708  2.8121  3.54205  4.3789  6.521
## d.C_MAC_MAC.Macintosh                -0.2523  0.9148  1.38656  1.9219  3.307
## d.C_MAC_MAC.McGrath_Series_5          1.3368  2.8446  3.65752  4.5912  6.839
## d.Macintosh.Glidescope               -1.5403 -0.4179  0.06048  0.5717  1.898
## d.Macintosh.Kingvision_Non_channeled -3.2955 -0.8293  0.18731  1.2411  3.860
## d.Macintosh.McGrath_MAC              -4.9829 -2.2897 -1.26052 -0.3736  1.350
## d.Macintosh.Pentax_AWS                1.6942  8.8252 16.83783 28.4262 55.944
## d.Macintosh.Truview                  -5.6691 -2.5753 -1.34195 -0.2132  2.067
## sd.d                                  0.1634  0.8795  1.32482  1.8430  2.792
## 
## -- Model fit (residual deviance):
## 
##     Dbar       pD      DIC 
## 50.56853 38.49697 89.06551 
## 
## 86 data points, ratio 0.588, I^2 = 0%
gelman.plot(mcmc.fail.1.non_experienced)

gelman.plot(mcmc.fail.2.non_experienced)

gelman.diag(mcmc.fail.1.non_experienced)$mpsrf
## [1] 2.464104
gelman.diag(mcmc.fail.2.non_experienced)$mpsrf
## [1] 1.074111
anohe.fail.non_experienced<-gemtc::mtc.anohe(graph.fail.non_experienced,n.adapt = 5000, n.iter = 1e5, thin = 10,sampler="rjags")
summary(anohe.fail.non_experienced)
nodesplit.fail.non_experienced <- gemtc::mtc.nodesplit(graph.fail.non_experienced, linearModel = "random", likelihood = "binom",link = "logit",n.adapt = 5000, n.iter = 1e5, thin = 10)
summary(nodesplit.fail.non_experienced)
forest(gemtc::relative.effect(mcmc.fail.2.non_experienced, t1 = "Macintosh"), use.description = TRUE,xlim = c(-10, 10))

rank.probability.fail.non_experienced <- gemtc::rank.probability(mcmc.fail.2.non_experienced, preferredDirection = -1)
plot(rank.probability.fail.non_experienced, beside=TRUE)

sucra.fail.non_experienced <- dmetar::sucra(rank.probability.fail.non_experienced, lower.is.better = FALSE)
sucra.fail.non_experienced
##                              SUCRA
## C_MAC_MAC                0.8237750
## McGrath_MAC              0.7661300
## Truview                  0.7558675
## APA_MAC                  0.7241525
## Airtraq                  0.5382100
## Macintosh                0.5147325
## Glidescope               0.5037900
## Kingvision_Non_channeled 0.4985600
## Kingvision_Channeled     0.1822000
## McGrath_Series_5         0.1785475
## Pentax_AWS               0.0140350

Network meta-analaysis of failed intubation for predicted difficult airway

net.fail.predicted.2<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Table Analysis Network Failed Intubation Predicted.csv")
sum(net.fail.predicted.2$sampleSize)
## [1] 6336
table(net.fail.predicted.2$treatment)
## 
##                  Airtraq    Airtraq_Non_channeled                  APA_DAB 
##                       18                        1                        1 
##                  APA_MAC                    C_MAC                  C_MAC_D 
##                        1                        9                        6 
##               Glidescope     Kingvision_Channeled Kingvision_Non_channeled 
##                       19                        4                        4 
##                Macintosh              McGrath_MAC         McGrath_Series_5 
##                       48                        3                        6 
##               Pentax_AWS                  Truview 
##                        9                        4
id<-c("Airtraq","Airtraq_Non_channeled","APA_DAB","APA_MAC","C_MAC","C_MAC_D","Glidescope","Kingvision_Channeled","Kingvision_Non_channeled","Macintosh","McGrath_MAC","McGrath_Series_5","Pentax_AWS","Truview")
description<-c("Airtraq","Airtraq Non-channeled","APA DAB","APA MAC","C-MAC","C-MAC D","Glidescope","KingVision Channeled","KingVision Non-channeled","Macintosh","McGrath MAC","McGrath Series 5", "Pentax AWS","Truview")
treat.codes.fail.predicted.2<-data.frame(id,description)
graph.fail.predicted.2<-gemtc::mtc.network(data.ab = net.fail.predicted.2,treatments = treat.codes.fail.predicted.2)
## Warning in remove.onearm(data.ab, warn = TRUE): Removing 1 one-arm studies:
## Bakshi_3 2015
summary(graph.fail.predicted.2)
## $Description
## [1] "MTC dataset: Network"
## 
## $`Studies per treatment`
##                  Airtraq    Airtraq_Non_channeled                  APA_DAB 
##                       18                        1                        1 
##                  APA_MAC                    C_MAC                  C_MAC_D 
##                        1                        9                        6 
##               Glidescope     Kingvision_Channeled Kingvision_Non_channeled 
##                       19                        4                        4 
##                Macintosh              McGrath_MAC         McGrath_Series_5 
##                       47                        3                        6 
##               Pentax_AWS                  Truview 
##                        9                        4 
## 
## $`Number of n-arm studies`
## 2-arm 3-arm 4-arm 6-arm 
##    49     8     1     1 
## 
## $`Studies per treatment comparison`
##                          t1                       t2 nr
## 1                   Airtraq                  APA_DAB  1
## 2                   Airtraq                    C_MAC  1
## 3                   Airtraq                  C_MAC_D  1
## 4                   Airtraq               Glidescope  4
## 5                   Airtraq     Kingvision_Channeled  2
## 6                   Airtraq                Macintosh 14
## 7                   Airtraq              McGrath_MAC  1
## 8     Airtraq_Non_channeled                  APA_MAC  1
## 9     Airtraq_Non_channeled Kingvision_Non_channeled  1
## 10    Airtraq_Non_channeled                Macintosh  1
## 11                  APA_DAB                  C_MAC_D  1
## 12                  APA_DAB               Glidescope  1
## 13                  APA_DAB     Kingvision_Channeled  1
## 14                  APA_DAB              McGrath_MAC  1
## 15                  APA_MAC Kingvision_Non_channeled  1
## 16                  APA_MAC                Macintosh  1
## 17                    C_MAC                  C_MAC_D  1
## 18                    C_MAC               Glidescope  2
## 19                    C_MAC Kingvision_Non_channeled  1
## 20                    C_MAC                Macintosh  6
## 21                    C_MAC              McGrath_MAC  1
## 22                  C_MAC_D               Glidescope  3
## 23                  C_MAC_D     Kingvision_Channeled  1
## 24                  C_MAC_D Kingvision_Non_channeled  1
## 25                  C_MAC_D                Macintosh  3
## 26                  C_MAC_D              McGrath_MAC  1
## 27               Glidescope     Kingvision_Channeled  2
## 28               Glidescope Kingvision_Non_channeled  1
## 29               Glidescope                Macintosh 11
## 30               Glidescope              McGrath_MAC  1
## 31               Glidescope         McGrath_Series_5  1
## 32               Glidescope               Pentax_AWS  3
## 33     Kingvision_Channeled Kingvision_Non_channeled  1
## 34     Kingvision_Channeled                Macintosh  1
## 35     Kingvision_Channeled              McGrath_MAC  1
## 36 Kingvision_Non_channeled                Macintosh  2
## 37                Macintosh         McGrath_Series_5  5
## 38                Macintosh               Pentax_AWS  8
## 39                Macintosh                  Truview  3
## 40              McGrath_MAC                  Truview  1
plot(graph.fail.predicted.2,use.description = TRUE,vertex.shape="circle",vertex.color="white",vertex.size=15,vertex.label.color="dark blue",vertex.label.cex=1.0,edge.color="light blue")

model.fail.predicted.2 <- mtc.model(graph.fail.predicted.2,likelihood = "binom",link = "logit",linearModel = "random",n.chain = 4)
mcmc.fail.1.predicted.2 <- mtc.run(model.fail.predicted.2, n.adapt = 50, n.iter = 1000, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 132
##    Unobserved stochastic nodes: 146
##    Total graph size: 2817
## 
## Initializing model
## Warning in rjags::jags.model(file.model, data = syntax[["data"]], inits =
## syntax[["inits"]], : Adaptation incomplete
## NOTE: Stopping adaptation
summary(mcmc.fail.1.predicted.2)
## 
## Results on the Log Odds Ratio scale
## 
## Iterations = 60:1050
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 100 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                          Mean     SD Naive SE Time-series SE
## d.Airtraq.APA_DAB                      4.0601  2.249  0.11244        0.11233
## d.Airtraq.C_MAC                        2.4934  1.808  0.09042        0.16735
## d.Airtraq.C_MAC_D                     -0.2134  2.214  0.11069        0.20991
## d.Airtraq.Glidescope                   0.9389  1.153  0.05763        0.07327
## d.Airtraq.Kingvision_Channeled         1.6472  1.673  0.08367        0.10873
## d.Airtraq.Macintosh                    3.8559  1.196  0.05979        0.12317
## d.Airtraq.McGrath_MAC                 -1.0737  2.486  0.12431        0.12426
## d.Macintosh.Airtraq_Non_channeled     -1.6013  2.376  0.11878        0.10920
## d.Macintosh.APA_MAC                   -0.5542  2.520  0.12600        0.12279
## d.Macintosh.Kingvision_Non_channeled  -3.0147  1.883  0.09413        0.09307
## d.Macintosh.McGrath_Series_5          -1.9692  1.573  0.07864        0.08932
## d.Macintosh.Pentax_AWS                -1.4606  1.265  0.06324        0.07594
## d.Macintosh.Truview                  -14.8937 23.443  1.17216        4.30550
## sd.d                                   2.5695  0.630  0.03150        0.09965
## 
## 2. Quantiles for each variable:
## 
##                                          2.5%      25%      50%      75%
## d.Airtraq.APA_DAB                     -0.7892   2.6723   4.1106  5.50409
## d.Airtraq.C_MAC                       -0.6588   1.2446   2.3644  3.60382
## d.Airtraq.C_MAC_D                     -4.5678  -1.6780  -0.1027  1.19376
## d.Airtraq.Glidescope                  -1.5118   0.1622   0.9495  1.68584
## d.Airtraq.Kingvision_Channeled        -1.3962   0.5322   1.6435  2.69728
## d.Airtraq.Macintosh                    1.8619   3.0043   3.7609  4.61906
## d.Airtraq.McGrath_MAC                 -6.0816  -2.6874  -1.0550  0.50661
## d.Macintosh.Airtraq_Non_channeled     -6.0586  -3.1505  -1.6484 -0.02144
## d.Macintosh.APA_MAC                   -5.3974  -2.1855  -0.4536  0.93472
## d.Macintosh.Kingvision_Non_channeled  -6.7502  -4.2269  -2.9423 -1.57374
## d.Macintosh.McGrath_Series_5          -5.3233  -3.0339  -1.9592 -0.90830
## d.Macintosh.Pentax_AWS                -4.4510  -2.2067  -1.3895 -0.63481
## d.Macintosh.Truview                  -69.2364 -26.7886 -13.3197  2.91463
## sd.d                                   1.4945   2.0791   2.5421  2.95909
##                                        97.5%
## d.Airtraq.APA_DAB                     8.1655
## d.Airtraq.C_MAC                       6.5037
## d.Airtraq.C_MAC_D                     4.5316
## d.Airtraq.Glidescope                  3.0556
## d.Airtraq.Kingvision_Channeled        4.6757
## d.Airtraq.Macintosh                   6.3467
## d.Airtraq.McGrath_MAC                 3.6357
## d.Macintosh.Airtraq_Non_channeled     3.0419
## d.Macintosh.APA_MAC                   4.4132
## d.Macintosh.Kingvision_Non_channeled  0.2036
## d.Macintosh.McGrath_Series_5          1.0228
## d.Macintosh.Pentax_AWS                0.8511
## d.Macintosh.Truview                  20.3329
## sd.d                                  3.8669
## 
## -- Model fit (residual deviance):
## 
##      Dbar        pD       DIC 
##  72.92300  57.23641 130.15941 
## 
## 132 data points, ratio 0.5524, I^2 = 0%
mcmc.fail.2.predicted.2 <- mtc.run(model.fail.predicted.2, n.adapt = 5000, n.iter = 1e5, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 132
##    Unobserved stochastic nodes: 146
##    Total graph size: 2817
## 
## Initializing model
summary(mcmc.fail.2.predicted.2)
## 
## Results on the Log Odds Ratio scale
## 
## Iterations = 5010:105000
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 10000 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                          Mean      SD Naive SE Time-series SE
## d.Airtraq.APA_DAB                      3.7443  2.4378 0.012189       0.014801
## d.Airtraq.C_MAC                        2.4617  1.8832 0.009416       0.023423
## d.Airtraq.C_MAC_D                     -0.7468  2.3273 0.011636       0.023596
## d.Airtraq.Glidescope                   0.9990  1.2918 0.006459       0.013342
## d.Airtraq.Kingvision_Channeled         1.6405  1.9775 0.009888       0.013530
## d.Airtraq.Macintosh                    3.9744  1.3449 0.006725       0.022687
## d.Airtraq.McGrath_MAC                 -0.9879  2.5382 0.012691       0.015854
## d.Macintosh.Airtraq_Non_channeled     -1.8877  2.6004 0.013002       0.013350
## d.Macintosh.APA_MAC                   -0.6649  2.5966 0.012983       0.013253
## d.Macintosh.Kingvision_Non_channeled  -3.1261  1.9899 0.009950       0.011652
## d.Macintosh.McGrath_Series_5          -2.1094  1.6583 0.008292       0.009449
## d.Macintosh.Pentax_AWS                -1.5494  1.2786 0.006393       0.007931
## d.Macintosh.Truview                  -44.7608 44.7920 0.223960       3.895587
## sd.d                                   2.6719  0.6794 0.003397       0.011548
## 
## 2. Quantiles for each variable:
## 
##                                           2.5%      25%      50%      75%
## d.Airtraq.APA_DAB                      -1.0759   2.2093   3.7052   5.2636
## d.Airtraq.C_MAC                        -0.9717   1.1948   2.3484   3.6019
## d.Airtraq.C_MAC_D                      -5.4842  -2.2040  -0.7241   0.7443
## d.Airtraq.Glidescope                   -1.4630   0.1569   0.9485   1.8000
## d.Airtraq.Kingvision_Channeled         -2.1887   0.3736   1.5922   2.8708
## d.Airtraq.Macintosh                     1.6802   3.0345   3.8533   4.7773
## d.Airtraq.McGrath_MAC                  -6.0410  -2.6139  -1.0104   0.6321
## d.Macintosh.Airtraq_Non_channeled      -7.2161  -3.4887  -1.8648  -0.2391
## d.Macintosh.APA_MAC                    -5.9649  -2.2835  -0.6376   0.9866
## d.Macintosh.Kingvision_Non_channeled   -7.2358  -4.3568  -3.0799  -1.8557
## d.Macintosh.McGrath_Series_5           -5.6015  -3.1359  -2.0469  -1.0344
## d.Macintosh.Pentax_AWS                 -4.2112  -2.3447  -1.5027  -0.7167
## d.Macintosh.Truview                  -150.2843 -72.4009 -35.8708 -11.5823
## sd.d                                    1.4900   2.1602   2.6200   3.1514
##                                        97.5%
## d.Airtraq.APA_DAB                     8.6909
## d.Airtraq.C_MAC                       6.5475
## d.Airtraq.C_MAC_D                     3.8454
## d.Airtraq.Glidescope                  3.7000
## d.Airtraq.Kingvision_Channeled        5.7405
## d.Airtraq.Macintosh                   6.9848
## d.Airtraq.McGrath_MAC                 4.0755
## d.Macintosh.Airtraq_Non_channeled     3.2349
## d.Macintosh.APA_MAC                   4.5018
## d.Macintosh.Kingvision_Non_channeled  0.7125
## d.Macintosh.McGrath_Series_5          1.0502
## d.Macintosh.Pentax_AWS                0.8765
## d.Macintosh.Truview                  21.9212
## sd.d                                  4.0535
## 
## -- Model fit (residual deviance):
## 
##      Dbar        pD       DIC 
##  71.93959  57.13558 129.07517 
## 
## 132 data points, ratio 0.545, I^2 = 0%
gelman.plot(mcmc.fail.1.predicted.2)

gelman.plot(mcmc.fail.2.predicted.2)

gelman.diag(mcmc.fail.1.predicted.2)$mpsrf
## [1] 3.997298
gelman.diag(mcmc.fail.2.predicted.2)$mpsrf
## [1] 1.024076
anohe.fail.predicted.2<-gemtc::mtc.anohe(graph.fail.predicted.2,n.adapt = 5000, n.iter = 1e5, thin = 10,sampler="rjags")
summary(anohe.fail.predicted.2)
nodesplit.fail.predicted.2 <- gemtc::mtc.nodesplit(graph.fail.predicted.2, linearModel = "random", likelihood = "binom",link = "logit",n.adapt = 5000, n.iter = 1e5, thin = 10)
summary(nodesplit.fail.predicted.2)
forest(gemtc::relative.effect(mcmc.fail.2.predicted.2, t1 = "Macintosh"), use.description = TRUE,xlim = c(-10, 10))

rank.probability.fail.predicted.2 <- gemtc::rank.probability(mcmc.fail.2.predicted.2, preferredDirection = -1)
plot(rank.probability.fail.predicted.2, beside=TRUE)

sucra.fail.predicted.2 <- dmetar::sucra(rank.probability.fail.predicted.2, lower.is.better = FALSE)
sucra.fail.predicted.2
##                              SUCRA
## Truview                  0.8371269
## McGrath_MAC              0.7724038
## C_MAC_D                  0.7617077
## Airtraq                  0.7244942
## Kingvision_Non_channeled 0.5996481
## Glidescope               0.5832519
## Kingvision_Channeled     0.4726308
## McGrath_Series_5         0.4532673
## Airtraq_Non_channeled    0.4302173
## Pentax_AWS               0.3657135
## C_MAC                    0.3656673
## APA_MAC                  0.2792365
## APA_DAB                  0.2197346
## Macintosh                0.1349000

Network meta-analysis of failed intubation for non-predicted difficult airway

net.fail.non_predicted.2<-read.csv2("~/Desktop/SR Video/Tables for Analyses/Ultimate Tables for Publication/Table Analysis Network Failed Intubation Non-predicted.csv")
sum(net.fail.non_predicted.2$sampleSize)
## [1] 7959
table(net.fail.non_predicted.2$treatment)
## 
##                  Airtraq                  APA_MAC                    C_MAC 
##                       14                        1                       14 
##                  C_MAC_D                  CEL_100               Glidescope 
##                        4                        1                       30 
##     Kingvision_Channeled Kingvision_Non_channeled                Macintosh 
##                        9                        2                       61 
##              McGrath_MAC         McGrath_Series_5               Pentax_AWS 
##                       14                       12                        6 
##                  Truview 
##                        5
id<-c("Airtraq","APA_MAC","C_MAC","C_MAC_D","CEL_100","Glidescope","Kingvision_Channeled","Kingvision_Non_channeled","Macintosh","McGrath_MAC","McGrath_Series_5","Pentax_AWS","Truview")
description<-c("Airtraq","APA MAC","C-MAC","C-MAC D","CEL 100","Glidescope","KingVision Channeled","KingVision Non-channeled","Macintosh","McGrath MAC","McGrath Series 5", "Pentax AWS","Truview")
treat.codes.fail.non_predicted.2<-data.frame(id,description)
graph.fail.non_predicted.2<-gemtc::mtc.network(data.ab = net.fail.non_predicted.2,treatments = treat.codes.fail.non_predicted.2)
summary(graph.fail.non_predicted.2)
## $Description
## [1] "MTC dataset: Network"
## 
## $`Studies per treatment`
##                  Airtraq                  APA_MAC                    C_MAC 
##                       14                        1                       14 
##                  C_MAC_D                  CEL_100               Glidescope 
##                        4                        1                       30 
##     Kingvision_Channeled Kingvision_Non_channeled                Macintosh 
##                        9                        2                       61 
##              McGrath_MAC         McGrath_Series_5               Pentax_AWS 
##                       14                       12                        6 
##                  Truview 
##                        5 
## 
## $`Number of n-arm studies`
## 2-arm 3-arm 4-arm 
##    67     9     3 
## 
## $`Studies per treatment comparison`
##                          t1                       t2 nr
## 1                   Airtraq                    C_MAC  1
## 2                   Airtraq               Glidescope  3
## 3                   Airtraq     Kingvision_Channeled  1
## 4                   Airtraq Kingvision_Non_channeled  1
## 5                   Airtraq                Macintosh  9
## 6                   Airtraq         McGrath_Series_5  2
## 7                   Airtraq               Pentax_AWS  1
## 8                   APA_MAC                    C_MAC  1
## 9                   APA_MAC     Kingvision_Channeled  1
## 10                    C_MAC               Glidescope  3
## 11                    C_MAC     Kingvision_Channeled  2
## 12                    C_MAC                Macintosh  9
## 13                    C_MAC              McGrath_MAC  1
## 14                    C_MAC         McGrath_Series_5  3
## 15                    C_MAC               Pentax_AWS  1
## 16                  C_MAC_D               Glidescope  1
## 17                  C_MAC_D     Kingvision_Channeled  1
## 18                  C_MAC_D                Macintosh  3
## 19                  CEL_100                Macintosh  1
## 20               Glidescope     Kingvision_Channeled  2
## 21               Glidescope Kingvision_Non_channeled  2
## 22               Glidescope                Macintosh 21
## 23               Glidescope              McGrath_MAC  3
## 24               Glidescope         McGrath_Series_5  3
## 25               Glidescope               Pentax_AWS  2
## 26     Kingvision_Channeled                Macintosh  5
## 27     Kingvision_Channeled              McGrath_MAC  1
## 28 Kingvision_Non_channeled                Macintosh  1
## 29                Macintosh              McGrath_MAC 10
## 30                Macintosh         McGrath_Series_5  6
## 31                Macintosh               Pentax_AWS  3
## 32                Macintosh                  Truview  5
## 33              McGrath_MAC               Pentax_AWS  1
## 34              McGrath_MAC                  Truview  1
## 35         McGrath_Series_5                  Truview  1
plot(graph.fail.non_predicted.2,use.description = TRUE,vertex.shape="circle",vertex.color="white",vertex.size=15,vertex.label.color="dark blue",vertex.label.cex=1.0,edge.color="light blue")

model.fail.non_predicted.2 <- mtc.model(graph.fail.non_predicted.2,likelihood = "binom",link = "logit",linearModel = "random",n.chain = 4)
mcmc.fail.1.non_predicted.2 <- mtc.run(model.fail.non_predicted.2, n.adapt = 50, n.iter = 1000, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 173
##    Unobserved stochastic nodes: 186
##    Total graph size: 3537
## 
## Initializing model
## Warning in rjags::jags.model(file.model, data = syntax[["data"]], inits =
## syntax[["inits"]], : Adaptation incomplete
## NOTE: Stopping adaptation
summary(mcmc.fail.1.non_predicted.2)
## 
## Results on the Log Odds Ratio scale
## 
## Iterations = 60:1050
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 100 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                         Mean      SD Naive SE Time-series SE
## d.C_MAC.Airtraq                       3.2329  1.5123  0.07562        0.19069
## d.C_MAC.APA_MAC                       1.1758  1.9527  0.09763        0.14041
## d.C_MAC.Glidescope                    2.3181  1.4431  0.07216        0.20065
## d.C_MAC.Kingvision_Channeled          5.1346  1.4822  0.07411        0.14311
## d.C_MAC.Macintosh                     3.0354  1.1762  0.05881        0.15191
## d.C_MAC.McGrath_MAC                   2.0300  1.5298  0.07649        0.16888
## d.C_MAC.McGrath_Series_5              3.1194  1.3329  0.06665        0.14724
## d.C_MAC.Pentax_AWS                   -9.4908  6.1197  0.30598        1.07785
## d.Macintosh.C_MAC_D                  -5.5893  4.9617  0.24808        1.11281
## d.Macintosh.CEL_100                  -0.6285  1.9959  0.09980        0.10505
## d.Macintosh.Kingvision_Non_channeled -7.6874 19.7580  0.98790        5.37556
## d.Macintosh.Truview                  -2.0751  1.9667  0.09834        0.12487
## sd.d                                  1.5137  0.6147  0.03074        0.07996
## 
## 2. Quantiles for each variable:
## 
##                                          2.5%      25%     50%     75%  97.5%
## d.C_MAC.Airtraq                        0.5898   2.0263  3.1712  4.3461  6.142
## d.C_MAC.APA_MAC                       -2.3701  -0.1674  1.0464  2.3995  5.016
## d.C_MAC.Glidescope                    -0.1268   1.3047  2.1733  3.2705  5.494
## d.C_MAC.Kingvision_Channeled           2.6021   4.0084  5.0755  6.1055  8.412
## d.C_MAC.Macintosh                      1.0313   2.1206  3.0113  3.8765  5.609
## d.C_MAC.McGrath_MAC                   -1.2794   1.1329  2.0379  3.0866  4.748
## d.C_MAC.McGrath_Series_5               0.7718   2.1684  2.9989  3.9330  5.930
## d.C_MAC.Pentax_AWS                   -22.4386 -13.8448 -9.0169 -5.1581  2.257
## d.Macintosh.C_MAC_D                  -17.5578  -8.6433 -4.6543 -2.0991  2.183
## d.Macintosh.CEL_100                   -4.6942  -1.8587 -0.8051  0.6907  3.366
## d.Macintosh.Kingvision_Non_channeled -46.6796 -21.8347 -7.3105  6.0588 27.438
## d.Macintosh.Truview                   -6.0043  -3.1964 -1.9287 -0.8129  1.716
## sd.d                                   0.3958   1.1045  1.4672  1.8614  2.827
## 
## -- Model fit (residual deviance):
## 
##      Dbar        pD       DIC 
##  69.75033  50.51614 120.26648 
## 
## 173 data points, ratio 0.4032, I^2 = 0%
mcmc.fail.2.non_predicted.2 <- mtc.run(model.fail.non_predicted.2, n.adapt = 5000, n.iter = 1e5, thin = 10)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 173
##    Unobserved stochastic nodes: 186
##    Total graph size: 3537
## 
## Initializing model
summary(mcmc.fail.2.non_predicted.2)
## 
## Results on the Log Odds Ratio scale
## 
## Iterations = 5010:105000
## Thinning interval = 10 
## Number of chains = 4 
## Sample size per chain = 10000 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                                         Mean      SD Naive SE Time-series SE
## d.C_MAC.Airtraq                        3.287  1.5035 0.007517        0.02486
## d.C_MAC.APA_MAC                        1.391  2.0277 0.010138        0.01636
## d.C_MAC.Glidescope                     2.378  1.4188 0.007094        0.02298
## d.C_MAC.Kingvision_Channeled           5.398  1.6518 0.008259        0.02078
## d.C_MAC.Macintosh                      3.057  1.2293 0.006146        0.02224
## d.C_MAC.McGrath_MAC                    2.053  1.5609 0.007804        0.02018
## d.C_MAC.McGrath_Series_5               3.130  1.4010 0.007005        0.02143
## d.C_MAC.Pentax_AWS                   -35.242 27.1691 0.135845        2.80983
## d.Macintosh.C_MAC_D                  -10.497 10.1647 0.050824        0.53355
## d.Macintosh.CEL_100                   -0.507  2.0281 0.010140        0.01086
## d.Macintosh.Kingvision_Non_channeled -23.644 33.2385 0.166192        2.60951
## d.Macintosh.Truview                   -2.094  2.0905 0.010453        0.01746
## sd.d                                   1.645  0.5835 0.002918        0.01123
## 
## 2. Quantiles for each variable:
## 
##                                          2.5%       25%      50%       75%
## d.C_MAC.Airtraq                        0.6976   2.23278   3.1600   4.19592
## d.C_MAC.APA_MAC                       -2.5771   0.09272   1.3650   2.66482
## d.C_MAC.Glidescope                    -0.2157   1.43076   2.2960   3.24159
## d.C_MAC.Kingvision_Channeled           2.5190   4.27000   5.2633   6.38015
## d.C_MAC.Macintosh                      0.8936   2.21580   2.9643   3.78370
## d.C_MAC.McGrath_MAC                   -0.9936   1.04820   2.0149   3.03013
## d.C_MAC.McGrath_Series_5               0.6298   2.17267   3.0169   3.97584
## d.C_MAC.Pentax_AWS                   -97.9582 -51.65964 -29.7445 -13.44091
## d.Macintosh.C_MAC_D                  -38.5908 -14.91442  -8.0192  -3.44025
## d.Macintosh.CEL_100                   -4.6341  -1.75186  -0.4988   0.75057
## d.Macintosh.Kingvision_Non_channeled -92.1609 -45.65523 -21.3575   0.03648
## d.Macintosh.Truview                   -6.5236  -3.36960  -2.0132  -0.73881
## sd.d                                   0.6322   1.21092   1.5958   2.05175
##                                        97.5%
## d.C_MAC.Airtraq                       6.6358
## d.C_MAC.APA_MAC                       5.5463
## d.C_MAC.Glidescope                    5.4262
## d.C_MAC.Kingvision_Channeled          9.0422
## d.C_MAC.Macintosh                     5.7763
## d.C_MAC.McGrath_MAC                   5.2735
## d.C_MAC.McGrath_Series_5              6.1720
## d.C_MAC.Pentax_AWS                   -0.1398
## d.Macintosh.C_MAC_D                   1.9218
## d.Macintosh.CEL_100                   3.5392
## d.Macintosh.Kingvision_Non_channeled 36.6400
## d.Macintosh.Truview                   1.8651
## sd.d                                  2.8377
## 
## -- Model fit (residual deviance):
## 
##      Dbar        pD       DIC 
##  68.51861  50.98753 119.50614 
## 
## 173 data points, ratio 0.3961, I^2 = 0%
gelman.plot(mcmc.fail.1.non_predicted.2)

gelman.plot(mcmc.fail.2.non_predicted.2)

gelman.diag(mcmc.fail.1.non_predicted.2)$mpsrf
## [1] 3.767329
gelman.diag(mcmc.fail.2.non_predicted.2)$mpsrf
## [1] 1.099578
anohe.fail.non_predicted.2<-gemtc::mtc.anohe(graph.fail.non_predicted.2,n.adapt = 5000, n.iter = 1e5, thin = 10,sampler="rjags")
summary(anohe.fail.non_predicted.2)
nodesplit.fail.non_predicted.2 <- gemtc::mtc.nodesplit(graph.fail.non_predicted.2, linearModel = "random", likelihood = "binom",link = "logit",n.adapt = 5000, n.iter = 1e5, thin = 10)
summary(nodesplit.fail.non_predicted.2)
forest(gemtc::relative.effect(mcmc.fail.2.non_predicted.2, t1 = "Macintosh"), use.description = TRUE,xlim = c(-10, 10))

rank.probability.fail.non_predicted.2 <- gemtc::rank.probability(mcmc.fail.2.non_predicted.2, preferredDirection = -1)
plot(rank.probability.fail.non_predicted.2, beside=TRUE)

sucra.fail.non_predicted.2 <- dmetar::sucra(rank.probability.fail.non_predicted.2, lower.is.better = FALSE)
sucra.fail.non_predicted.2
##                               SUCRA
## Pentax_AWS               0.95131458
## C_MAC_D                  0.78107292
## C_MAC                    0.72531667
## Kingvision_Non_channeled 0.70006458
## Truview                  0.58844375
## APA_MAC                  0.53638333
## McGrath_MAC              0.47173542
## Glidescope               0.43473542
## CEL_100                  0.39466875
## Macintosh                0.29297500
## McGrath_Series_5         0.29246250
## Airtraq                  0.26905000
## Kingvision_Channeled     0.06177708