### This script first of all takes the aggregate data from the Giustina et al
### experiment, analysed using a fixed rid of coincidence windows of width
### 50 time stamp units = 1 mu s, and then estimates the four 2x2 tables of probabilities
### under the assumption of no-signalling. Thus the marginal distributions match
### properly, but each 2x2 table has a log odds ratio which is equal to the
### log odds ratio observed in the experiment. Thus the two measurement outcomes
### have, according to the estimated probabilities,
### the same amount of "statistical dependence" as was observed in the
### experiment, but the estimates of their marginal distributions have been
### improved by assuming no-signalling and pooling the data from measurements
### with the same setting.
### I next compute the optimal test of local realism and the expected
### number of standard errors when each setting pair is measured the
### same number of times, which I take to be 300 000 000 times. I compare
### it with the CHSH test and the CH test (Christensen et al. version;
### Giustina et al version; Larsson et al version)
### Notation: "d" stands for detection, "n" for non-detection
### So "dd" means that both Alice and Bob have a detection ...
### Fitting the probabilities is done with the so-called
### "Iterative Proportional Fitting" method, which is
### asymptotically equivalent to maximum likelihood.
### See Little and Wu (1991) http://www.jstor.org/stable/2289718
### JASA 86, 87-95.
load("results50.RData")
result <- cbind(as.vector(as.matrix(as.data.frame(results11))),
as.vector(as.matrix(as.data.frame(results12))),
as.vector(as.matrix(as.data.frame(results21))),
as.vector(as.matrix(as.data.frame(results22))))
dimnames(result) <- list(count = c("C", "SA", "SB"),
setting = c(11, 12, 21, 22))
result <- rbind(result,
N = rep(30 * 100 * ceiling((0.1 / (20 * 10^{-9}) - 600)/50), 4))
result <- rbind(result,
Z = result["N", ] - result["SA", ] - result["SB", ] + result["C", ])
result
## 11 12 21 22
## C 1054015 1140690 1178589 124696
## SA 1521403 1517837 4687681 4682139
## SB 1693449 4477285 1687285 4468842
## N 299964000 299964000 299964000 299964000
## Z 297803163 295109568 294767623 290937715
tables <- rbind(
"dd" = result["C", ],
"dn" = result["SA", ] - result["C", ],
"nd" = result["SB", ] - result["C", ],
"nn" = result["Z", ])
tables
## 11 12 21 22
## dd 1054015 1140690 1178589 124696
## dn 467388 377147 3509092 4557443
## nd 639434 3336595 508696 4344146
## nn 297803163 295109568 294767623 290937715
probs <- tables / (outer(c(1, 1, 1, 1), result["N", ], ))
probs
## 11 12 21 22
## dd 0.003513805 0.003802756 0.003929101 0.0004157032
## dn 0.001558147 0.001257308 0.011698377 0.0151932999
## nd 0.002131702 0.011123318 0.001695857 0.0144822245
## nn 0.992796346 0.983816618 0.982676665 0.9699087724
AliceProbs1 <- (result["SA", "11"] + result["SA", "12"])/(result["N", "11"] + result["N", "12"])
AliceProbs2 <- (result["SA", "21"] + result["SA", "22"])/(result["N", "21"] + result["N", "22"])
BobProbs1 <- (result["SB", "11"] + result["SB", "21"])/(result["N", "11"] + result["N", "21"])
BobProbs2 <- (result["SB", "12"] + result["SB", "22"])/(result["N", "12"] + result["N", "22"])
AliceProbs1 <- c("d" = AliceProbs1, "n" = 1 - AliceProbs1)
AliceProbs2 <- c("d" = AliceProbs2, "n" = 1 - AliceProbs2)
BobProbs1 <- c("d" = BobProbs1, "n" = 1 - BobProbs1)
BobProbs2 <- c("d" = BobProbs2, "n" = 1 - BobProbs2)
P11mat <- t(matrix(tables[ , "11"], 2, 2)) / result["N", "11"]
dimnames(P11mat) <- list(alice = c("d", "n"), bob = c("d", "n"))
P11mat
## bob
## alice d n
## d 0.003513805 0.001558147
## n 0.002131702 0.992796346
P12mat <- t(matrix(tables[ , "12"], 2, 2)) / result["N", "12"]
dimnames(P12mat) <- list(alice = c("d", "n"), bob = c("d", "n"))
P12mat
## bob
## alice d n
## d 0.003802756 0.001257308
## n 0.011123318 0.983816618
P21mat <- t(matrix(tables[ , "21"], 2, 2)) / result["N", "21"]
dimnames(P21mat) <- list(alice = c("d", "n"), bob = c("d", "n"))
P21mat
## bob
## alice d n
## d 0.003929101 0.01169838
## n 0.001695857 0.98267666
P22mat <- t(matrix(tables[ , "22"], 2, 2)) / result["N", "22"]
dimnames(P22mat) <- list(alice = c("d", "n"), bob = c("d", "n"))
P22mat
## bob
## alice d n
## d 0.0004157032 0.0151933
## n 0.0144822245 0.9699088
fixProbs <- function(probs, alice, bob) {
for(i in 1: 10000) {
probs <- alice * probs / apply(probs, 1, sum)
pt <- t(probs)
probs <- t(bob * pt / apply(pt, 1, sum))
}
probs
}
P11 <- as.vector(t(fixProbs(P11mat, AliceProbs1, BobProbs1)))
P12 <- as.vector(t(fixProbs(P12mat, AliceProbs1, BobProbs2)))
P21 <- as.vector(t(fixProbs(P21mat, AliceProbs2, BobProbs1)))
P22 <- as.vector(t(fixProbs(P22mat, AliceProbs2, BobProbs2)))
Ns <- result["N", ]
FixedProbs <- c(P11, P12, P21, P22)
# dimnames(FixedProbs) <- dimnames(tables)
FixedProbs
## [1] 0.0035076100 0.0015583980 0.0021276229 0.9928063691 0.0038057588
## [6] 0.0012602491 0.0111062423 0.9838277498 0.0039347603 0.0116834806
## [11] 0.0017004726 0.9826812865 0.0004163345 0.0152019064 0.0144956666
## [16] 0.9698860925
## Here are the expected counts under the new estimated model.
## i.e. no-signalling has been assumed and incorporated in the estimation.
## Compare with the original
FixedProbsMat <- matrix(FixedProbs, 4, 4,
dimnames = list(outcomes = c("dd", "dn", "nd", "nn"),
settings = c(11, 12, 21, 22)))
FixedCountsMat <- FixedProbsMat * outer(rep(1, 4), Ns)
dimnames(FixedCountsMat) <- dimnames(tables)
FixedCountsMat
## 11 12 21 22
## dd 1052156.7 1141590.6 1180286.4 124885.4
## dn 467463.3 378029.4 3504623.6 4560024.6
## nd 638210.3 3331472.9 510080.6 4348178.1
## nn 297806169.7 295112907.1 294769009.4 290930911.9
tables
## 11 12 21 22
## dd 1054015 1140690 1178589 124696
## dn 467388 377147 3509092 4557443
## nd 639434 3336595 508696 4344146
## nn 297803163 295109568 294767623 290937715
### From now on we do a theoretical exercise in which the "true probabilities"
### are given by fixed probabilities which we just computed.
### We will take the number of trials at each setting combination
### to be the same round number 300 000 000.
### Because the true probabilities satisfy no-signalling, CHSH and CH give the same
### answer (when normalised properly). However when we "estimate" them by doing the
### experiment, the estimates have different standard errors. Hence the expected
### number of standard deviations departure from local realism can be different.
### Moreover, there are uncountably many alternative tests. I will compute the best
### one in the sense of having the smallest statistical error.
VecNames <- as.vector(t(outer(colnames(FixedProbsMat), rownames(FixedProbsMat), paste, sep = "")))
names(FixedProbs) <- VecNames
CHSH <- c(c(1, -1, -1, 1), c(1, -1, -1, 1), c(1, -1, -1, 1), c(-1, 1, 1, -1))
names(CHSH) <- VecNames
## value of CHSH
sum(CHSH * FixedProbs)
## [1] 2.000522
Aplus <- c(1, 1, 0, 0)
Aminus <- - Aplus
Bplus <- c(1, 0, 1, 0)
Bminus <- - Bplus
zero <- c(0, 0, 0, 0)
NSa1 <- c(Aplus, Aminus, zero, zero)
NSa2 <- c(zero, zero, Aplus, Aminus)
NSb1 <- c(Bplus, zero, Bminus, zero)
NSb2 <- c(zero, Bplus, zero, Bminus)
NS <- cbind(NSa1 = NSa1, NSa2 = NSa2, NSb1 = NSb1, NSb2 = NSb2)
rownames(NS) <- VecNames
NS
## NSa1 NSa2 NSb1 NSb2
## 11dd 1 0 1 0
## 11dn 1 0 0 0
## 11nd 0 0 1 0
## 11nn 0 0 0 0
## 12dd -1 0 0 1
## 12dn -1 0 0 0
## 12nd 0 0 0 1
## 12nn 0 0 0 0
## 21dd 0 1 -1 0
## 21dn 0 1 0 0
## 21nd 0 0 -1 0
## 21nn 0 0 0 0
## 22dd 0 -1 0 -1
## 22dn 0 -1 0 0
## 22nd 0 0 0 -1
## 22nn 0 0 0 0
cov11 <- diag(FixedProbsMat[ , "11"]) - outer(FixedProbsMat[ , "11"], FixedProbsMat[ , "11"])
cov12 <- diag(FixedProbsMat[ , "12"]) - outer(FixedProbsMat[ , "12"], FixedProbsMat[ , "12"])
cov21 <- diag(FixedProbsMat[ , "21"]) - outer(FixedProbsMat[ , "21"], FixedProbsMat[ , "21"])
cov22 <- diag(FixedProbsMat[ , "22"]) - outer(FixedProbsMat[ , "22"], FixedProbsMat[ , "22"])
Cov <- matrix(0, 16, 16)
rownames(Cov) <- VecNames
colnames(Cov) <- VecNames
Cov[1:4, 1:4] <- cov11
Cov[5:8, 5:8] <- cov12
Cov[9:12, 9:12] <- cov21
Cov[13:16, 13:16] <- cov22
varS <- t(CHSH) %*% Cov %*% CHSH
covSN <- t(CHSH) %*% Cov %*% NS
covNS <- t(covSN)
covNN <- t(NS) %*% Cov %*% NS
varS
## [,1]
## [1,] 0.2316258
N <- 300000000
seS <- sqrt(varS / N)
S <- sum(CHSH * FixedProbs)
### Number of standard deviations when we use plain CHSH
(S - 2) / seS
## [,1]
## [1,] 18.79388
covSN %*% solve(covNN) %*% covNS
## [,1]
## [1,] 0.1809183
seSopt <- sqrt((varS - covSN %*% solve(covNN) %*% covNS)/N)
S <- sum(CHSH * FixedProbs)
### Number of standard deviations when we use optimal test
(S - 2) / seSopt
## [,1]
## [1,] 40.16742
## coefficients of optimal test
## (= CHSH minus this linear combiation of four no-signalling constraints)
covSN %*% solve(covNN)
## NSa1 NSa2 NSb1 NSb2
## [1,] -0.5376317 -1.759922 -0.5231683 -1.757428
### Christensen et al. CH
B <- c(c(1, 0, 0, 0), c(1, 0, 0, 0), c(1, 0, 0, 0), c(-1, 0, 0, 0))
B <- B - (c(Aplus, Aplus, zero, zero) + c(Bplus, zero, Bplus, zero)) / 2
sum(B * FixedProbs)
## [1] 0.0001305537
varB <- t(B) %*% Cov %*% B / N
seB <- sqrt(varB)
seB
## [,1]
## [1,] 3.654546e-06
## Number of standard deviations when we use CH (Christensen et al version)
sum(B * FixedProbs) / seB
## [,1]
## [1,] 35.72365
### Check constraints
sum(c(Aplus, -Aplus, zero, zero) * FixedProbs)
## [1] 4.336809e-19
sum(c(zero, zero, Aplus, -Aplus) * FixedProbs)
## [1] -8.131516e-19
sum(c(Bplus, zero, -Bplus, zero) * FixedProbs)
## [1] 4.336809e-19
sum(c(zero, Bplus, zero, -Bplus) * FixedProbs)
## [1] 5.421011e-20
### Giustina et al. J (with minus sign)
J <- c(c(1, 0, 0, 0), c(1, 0, 0, 0), c(1, 0, 0, 0), c(-1, 0, 0, 0))
J <- J - (c(zero, Aplus, zero, zero) + c(zero, zero, Bplus, zero))
sum(J * FixedProbs)
## [1] 0.0001305537
varJ <- t(J) %*% Cov %*% J / N
seJ <- sqrt(varJ)
seJ
## [,1]
## [1,] 4.784597e-06
## Number of standard deviations when we use CH (Giustina et al version)
sum(J * FixedProbs) / seJ
## [,1]
## [1,] 27.28625
### Larsson et al J (with minus sign)
Jalt <- c(c(1, 0, 0, 0), c(1, 0, 0, 0), c(1, 0, 0, 0), c(-1, 0, 0, 0))
Jalt <- Jalt - (c(Aplus, zero, zero, zero) + c(Bplus, zero, zero, zero))
sum(Jalt * FixedProbs)
## [1] 0.0001305537
varJalt <- t(Jalt) %*% Cov %*% Jalt / N
seJalt <- sqrt(varJalt)
seJalt
## [,1]
## [1,] 7.134096e-06
## Number of standard deviations when we use CH (Larsson et al version)
sum(Jalt * FixedProbs) / seJalt
## [,1]
## [1,] 18.29997