## WARNING: Michel Fodje strongly disagrees my interpretation
## and use of the algorithms and mathematical model in his code.
## He has so far refused to comment on possible links with the
## "coincidence loophole" model Pascazio (1988), Larsson and Gill (2004),
## de Raedt, Hess, Michielsen (2011).
## The present code resulted from study of Michel Fodje's "epr-clocked"
## https://github.com/minkwe/epr-clocked (Python)
## Michel's algorithms are an instance of what has been known for quite
## a long time now as the "coincidence loophole".
## I have studied his code, extracted the mathematical algorithms
## and re-programmed part of them in R.
## This is what I consider the essential core of epr-clocked
## see my "epr-clocked-full" for a full version: all outer layers added.
## This version: June 2015. Original version: June 2014.
## Changes: *only* in this header text.
## Europhys. Lett., 67 (5), pp. 707–713 (2004)
## DOI: 10.1209/epl/i2004-10124-7
## Bell’s inequality and the coincidence-time loophole
## J.-A. Larsson and R. D. Gill
set.seed(-1) ## it took me a long time to find a seed
## which gave a violation of CHSH *without post-selection*
N <- 10^4
alpha <- c(0, 90) * pi / 180 # Alice's possible two settings
beta <- c(45, 135) * pi / 180 # Bob's possible two settings
a <- sample(c(1, 2), N, replace = TRUE) # Alice setting names (1, 2)
b <- sample(c(1, 2), N, replace = TRUE) # Bob setting names (1, 2)
coincWindow <- 0.0004
ts <- pi * 0.03 ## timescale
asym <- 0.98 ## assymetry parameter
spin <- 0.5
n <- 2 * spin ## so actually, n = 1
phase <- pi * n ## so actually, phase = pi
el <- runif(N, 0, 2 * pi) ## minkwe's e (for left)
er <- el + phase ## minkwe's e + phase (for right)
p <- 0.5 * sin(runif(N, 0, pi / 6))^2
ml <- runif(N, asym, 1) ## small random jitter, left
mr <- runif(N, asym, 1) ## small random jitter, right
Cl <- (0.5/pi) * (-1)^n * cos(n * (el - alpha[a])) ## cos(e-a), left
Cr <- (0.5/pi) * (-1)^n * cos(n * (er - beta[b])) ## cos(e-a), right
tdl <- ts * pmax(ml * p - abs(Cl), 0) ## time delays, left
tdr <- ts * pmax(mr * p - abs(Cr), 0) ## time delays, right
A <- sign(Cl) ## measurement outcomes, left
B <- sign(Cr) ## measurement outcomes, right
AB <- A * B ## product of outcomes
mean(AB[a == 1 & b == 1 & abs(tdl-tdr) < coincWindow])
## [1] -0.6441785
mean(AB[a == 1 & b == 2 & abs(tdl-tdr) < coincWindow])
## [1] 0.6443954
mean(AB[a == 2 & b == 1 & abs(tdl-tdr) < coincWindow])
## [1] -0.6601307
mean(AB[a == 2 & b == 2 & abs(tdl-tdr) < coincWindow])
## [1] -0.6773109
S <- - mean(AB[a == 1 & b == 1 & abs(tdl-tdr) < coincWindow]) +
mean(AB[a == 1 & b == 2 & abs(tdl-tdr) < coincWindow]) -
mean(AB[a == 2 & b == 1 & abs(tdl-tdr) < coincWindow]) -
mean(AB[a == 2 & b == 2 & abs(tdl-tdr) < coincWindow])
S ## CHSH
## [1] 2.626015
## Now we compute the detection rate in order to check against the Larsson-Gill
## modified CHSH for coincidence selection statistics.
gamma11 <- sum((a == 1 & b == 1 & abs(tdl-tdr) < coincWindow)) /
sum((a == 1 & b == 1))
gamma12 <- sum((a == 1 & b == 2 & abs(tdl-tdr) < coincWindow)) /
sum((a == 1 & b == 2))
gamma21 <- sum((a == 2 & b == 1 & abs(tdl-tdr) < coincWindow)) /
sum((a == 2 & b == 1))
gamma22 <- sum((a == 2 & b == 2 & abs(tdl-tdr) < coincWindow)) /
sum((a == 2 & b == 2))
gamma <- min(c(gamma11, gamma12, gamma21, gamma22))
gamma
## [1] 0.7068459
2 + 6 *(1/gamma -1) ## Larsson-Gill bound (well: the bound is min of this and 4)
## [1] 4.488414
S ## Observed value of CHSH
## [1] 2.626015
## We notice that the detection rate is actually rather bad.
## At this detection rate it could have been possible to arrange that CHSH = 4 !
## Instead of selecting pairs by the coincidence window we could simply have selected
## particles by arrival within a fixed time window. Here are two analyses.
## First: the fixed time window for detection is rather large.
## No particles are rejected.
mean(AB[a == 1 & b == 1])
## [1] -0.5031348
mean(AB[a == 1 & b == 2])
## [1] 0.4892115
mean(AB[a == 2 & b == 1])
## [1] -0.5144897
mean(AB[a == 2 & b == 2])
## [1] -0.5043685
Sall <- - mean(AB[a == 1 & b == 1]) +
mean(AB[a == 1 & b == 2]) -
mean(AB[a == 2 & b == 1]) -
mean(AB[a == 2 & b == 2])
Sall ## OMG, S > 2 ... by a statistically insignificant amount.
## [1] 2.011205
## Try the experiment again with a different random seed. And again.
## How often does this happen?
## ======================================
## Now a fixed upper time limit for detection.
## Say, half of the coincidence window we had before
mean(AB[a == 1 & b == 1 & tdl < 0.0002 & tdr < 0.0002])
## [1] -0.6570477
mean(AB[a == 1 & b == 2 & tdl < 0.0002 & tdr < 0.0002])
## [1] 0.6583991
mean(AB[a == 2 & b == 1 & tdl < 0.0002 & tdr < 0.0002])
## [1] -0.6847892
mean(AB[a == 2 & b == 2 & tdl < 0.0002 & tdr < 0.0002])
## [1] -0.6972477
Stl <- - mean(AB[a == 1 & b == 1 & tdl < 0.0002 & tdr < 0.0002]) +
mean(AB[a == 1 & b == 2 & tdl < 0.0002 & tdr < 0.0002]) -
mean(AB[a == 2 & b == 1 & tdl < 0.0002 & tdr < 0.0002]) -
mean(AB[a == 2 & b == 2 & tdl < 0.0002 & tdr < 0.0002])
Stl ## OMG violation of CHSH ...
## [1] 2.697484
## But wait, there are "non-detected" particles. Detection loophole.
## What is the detection rate and what is the Larsson modified CHSH bound?
## We need to know the experimental efficiency
## the minimal probability Alice has an outcome given Bob does and vice versa
A <- ifelse(tdl < 0.002, A, 0)
B <- ifelse(tdr < 0.002, B, 0)
effAB11 <- sum((A !=0 & B != 0)[a == 1 & b == 1]) / sum((B != 0)[a == 1 & b == 1])
effAB12 <- sum((A !=0 & B != 0)[a == 1 & b == 2]) / sum((B != 0)[a == 1 & b == 2])
effAB21 <- sum((A !=0 & B != 0)[a == 2 & b == 1]) / sum((B != 0)[a == 2 & b == 1])
effAB22 <- sum((A !=0 & B != 0)[a == 2 & b == 2]) / sum((B != 0)[a == 2 & b == 2])
effBA11 <- sum((A !=0 & B != 0)[a == 1 & b == 1]) / sum((A != 0)[a == 1 & b == 1])
effBA12 <- sum((A !=0 & B != 0)[a == 1 & b == 2]) / sum((A != 0)[a == 1 & b == 2])
effBA21 <- sum((A !=0 & B != 0)[a == 2 & b == 1]) / sum((A != 0)[a == 2 & b == 1])
effBA22 <- sum((A !=0 & B != 0)[a == 2 & b == 2]) / sum((A != 0)[a == 2 & b == 2])
gamma <- min(c(effAB11, effAB12, effAB21, effAB22,
effBA11, effBA12, effBA21, effBA22))
gamma
## [1] 0.8806768
delta <- 4*((1/gamma) - 1)
2 + delta
## [1] 2.541962
Stl ## OMG violation of Larsson modified CHSH bound!!!
## [1] 2.697484
## Is it "just statistical error"
## new random settings, everything else the same
a <- sample(c(1, 2), N, replace = TRUE) # Alice setting names (1, 2)
b <- sample(c(1, 2), N, replace = TRUE) # Bob setting names (1, 2)
Cl <- (0.5/pi) * (-1)^n * cos(n * (el - alpha[a]))
Cr <- (0.5/pi) * (-1)^n * cos(n * (er - beta[b]))
tdl <- ts * pmax(ml * p - abs(Cl), 0)
tdr <- ts * pmax(mr * p - abs(Cr), 0)
A <- sign(Cl)
B <- sign(Cr)
AB <- A * B
A <- ifelse(tdl < 0.002, A, 0)
B <- ifelse(tdr < 0.002, B, 0)
mean((A*B)[a == 1 & b ==1 & A*B != 0])
## [1] -0.5832492
mean((A*B)[a == 1 & b ==2 & A*B != 0])
## [1] 0.6038795
mean((A*B)[a == 2 & b ==1 & A*B != 0])
## [1] -0.6301162
mean((A*B)[a == 2 & b ==2 & A*B != 0])
## [1] -0.6164524
S <- - mean((A*B)[a == 1 & b ==1 & A*B != 0]) +
mean((A*B)[a == 1 & b ==2 & A*B != 0]) -
mean((A*B)[a == 2 & b ==1 & A*B != 0]) -
mean((A*B)[a == 2 & b ==2 & A*B != 0])
S ## CHSH
## [1] 2.433697
effAB11 <- sum((A !=0 & B != 0)[a == 1 & b == 1]) / sum((B != 0)[a == 1 & b == 1])
effAB12 <- sum((A !=0 & B != 0)[a == 1 & b == 2]) / sum((B != 0)[a == 1 & b == 2])
effAB21 <- sum((A !=0 & B != 0)[a == 2 & b == 1]) / sum((B != 0)[a == 2 & b == 1])
effAB22 <- sum((A !=0 & B != 0)[a == 2 & b == 2]) / sum((B != 0)[a == 2 & b == 2])
effBA11 <- sum((A !=0 & B != 0)[a == 1 & b == 1]) / sum((A != 0)[a == 1 & b == 1])
effBA12 <- sum((A !=0 & B != 0)[a == 1 & b == 2]) / sum((A != 0)[a == 1 & b == 2])
effBA21 <- sum((A !=0 & B != 0)[a == 2 & b == 1]) / sum((A != 0)[a == 2 & b == 1])
effBA22 <- sum((A !=0 & B != 0)[a == 2 & b == 2]) / sum((A != 0)[a == 2 & b == 2])
gamma <- min(c(effAB11, effAB12, effAB21, effAB22,
effBA11, effBA12, effBA21, effBA22))
gamma
## [1] 0.8783844
delta <- 4*((1/gamma) - 1)
2 + delta ## Phew ...
## [1] 2.553815
## It seems that the statistical error in the Larsson bound is rather high.
## But of course there are other bounds and I think they will be better from
## several statistical points of view ... namely all CHSH bounds obtained by
## all groupings (per party per setting) of three outcomes to 2, together with
## all CGLMP bounds.
## Note: all CHSH and all CGMP bounds means what you get after all permutations
## of parties, of settings per party.
## There are 8 CHSH bounds and I don't know how many CGLMP bounds.
## In a given LHV model, usually only one of this multitude of bounds will be
## "interesting" ie close to being violated.