setwd("C:/Data Science -Python/tennisCourts")
list.files("C:/Data Science -Python/tennisCourts")
##  [1] "File1.jpeg"                        
##  [2] "File10.jpeg"                       
##  [3] "File11.jpeg"                       
##  [4] "File12.jpeg"                       
##  [5] "File13.jpeg"                       
##  [6] "File14.jpeg"                       
##  [7] "File15.jpeg"                       
##  [8] "File16.jpeg"                       
##  [9] "File17.jpeg"                       
## [10] "File18.jpeg"                       
## [11] "File19.jpeg"                       
## [12] "File2.jpeg"                        
## [13] "File20.jpeg"                       
## [14] "File21.jpeg"                       
## [15] "File22.jpeg"                       
## [16] "File23.jpeg"                       
## [17] "File24.jpeg"                       
## [18] "File25.jpeg"                       
## [19] "File26.jpeg"                       
## [20] "File27.jpeg"                       
## [21] "File28.jpeg"                       
## [22] "File29.jpeg"                       
## [23] "File3.jpeg"                        
## [24] "File30.jpeg"                       
## [25] "File31.jpeg"                       
## [26] "File32.jpeg"                       
## [27] "File33.jpeg"                       
## [28] "File34.jpeg"                       
## [29] "File35.jpeg"                       
## [30] "File36.jpeg"                       
## [31] "File37.jpeg"                       
## [32] "File38.jpeg"                       
## [33] "File39.jpeg"                       
## [34] "File4.jpeg"                        
## [35] "File40.jpeg"                       
## [36] "File41.jpeg"                       
## [37] "File42.jpeg"                       
## [38] "File43.jpeg"                       
## [39] "File44.jpeg"                       
## [40] "File45.jpeg"                       
## [41] "File46.jpeg"                       
## [42] "File47.jpeg"                       
## [43] "File48.jpeg"                       
## [44] "File49.jpeg"                       
## [45] "File5.jpeg"                        
## [46] "File50.jpeg"                       
## [47] "File51.jpeg"                       
## [48] "File52.jpeg"                       
## [49] "File53.jpeg"                       
## [50] "File54.jpeg"                       
## [51] "File55.jpeg"                       
## [52] "File56.jpeg"                       
## [53] "File57.jpeg"                       
## [54] "File58.jpeg"                       
## [55] "File59.jpeg"                       
## [56] "File6.jpeg"                        
## [57] "File60.jpeg"                       
## [58] "File61.jpeg"                       
## [59] "File62.jpeg"                       
## [60] "File63.jpeg"                       
## [61] "File64.jpeg"                       
## [62] "File65.jpeg"                       
## [63] "File66.jpeg"                       
## [64] "File67.jpeg"                       
## [65] "File68.jpeg"                       
## [66] "File69.jpeg"                       
## [67] "File7.jpeg"                        
## [68] "File70.jpeg"                       
## [69] "File71.jpeg"                       
## [70] "File72.jpeg"                       
## [71] "File73.jpeg"                       
## [72] "File74.jpeg"                       
## [73] "File75.jpeg"                       
## [74] "File76.jpeg"                       
## [75] "File77.jpeg"                       
## [76] "File78.jpeg"                       
## [77] "File79.jpeg"                       
## [78] "File8.jpeg"                        
## [79] "File80.jpeg"                       
## [80] "File9.jpeg"                        
## [81] "Outputs"                           
## [82] "rsconnect"                         
## [83] "source image.jpeg"                 
## [84] "Tennis courts image alignment.Rmd" 
## [85] "Tennis_courts_image_alignment.html"
## [86] "test_r_markdown.html"              
## [87] "test_r_markdown.Rmd"

Tennis courts alignment test

To be honest, I don’t think this is the best algorithm out there, but it just happens to work on this dataset. My initial idea was simple enough, and I had several steps to achieve the goal by aligning the tennis courts with different colors, light condition, distorted shapes, and image resolution to either vertical or horizontal directions.

These steps include:

First, define the coordination, x and y-axis for particular, for each image of tennis court;

Second, filter out the red color channel, because most of the tennis courts are either blue or green color with outer white lines;

Third, using some corner/key points detector algorithms to detect the key points. I just read a lot of other people’s work and also learn some idea of facial recognition algorithms, e.g. Moravec, Harris. Luckily, Harris corner detector algorithm works in this case.

Forth using the corners’ coordination (x, y) value to calculate the angle of the key points. Since I have filtered the color channels and highlighted the target tennis courts, so the angle of key points would help to decide the central angle of the tennis courts either by lightest or darkest values of the pixels.

Fifth, twist the original images by calculated angle either to 90 degrees or 0 degrees, 90 was used in this case to align the tennis courts in the horizontal direction.

Read Jpeg data

#id = c(1:80)
id = 45
tnim=paste0("File",id,".jpeg")
scl = 1
tim = readJPEG(tnim)
tim =imresize(as.cimg(tim),scale = 1)
## Warning in as.cimg.array(tim): Assuming third dimension corresponds to
## colour
str(tim)
##  cimg [1:852, 1:1575, 1, 1:3] 0.129 0.129 0.129 0.125 0.125 ...
# The tennis courts should only be keep in G or B color chanels 
cimr=imresize(as.cimg(tim[,,1]),scale=1)

plot(tim)

plot(cimr)

Data processing

define the function of Harris corner detector

head(kp)
##    mx my
## 1 770 31
## 2 532 40
## 3 491 78
## 4 403 58
## 5 491 61
## 6 753 69
stencil <- expand.grid(dx=seq(-20,20,5)*scl,dy=seq(-20,20,5)*scl)
sigma_b=5*scl
ima_bl=isoblur(cimr,sigma_b,gaussian = T)

pi=3.141592653

ix=imgradient(ima_bl,"x")
iy=imgradient(ima_bl,"y")

ita=atan(iy/ix)*180/pi
hist(ita,breaks=180)

##Rotated the images The angle to rotate the image are calculated from Harris keypoint detectors.

get_orientations<-function(tim){
  ix=imgradient(tim,"x")
  iy=imgradient(tim,"y")
  ita=atan(iy/ix)*180/pi
  iga=table(sample(round(ita*2)/2,200))
  ma1=max(iga)[1]
  m1=which(iga==ma1)
  theta_1=(as.numeric(names(m1)))
  iga[max((m1-20),0):min((m1+20),length(iga))]=0
  ma2=max(iga)[1]
  m2=which(iga==ma2)
  theta_2=(as.numeric(names(m2)))
  if(theta_1>45) theta_1=theta_1-90
  if(theta_1<(-45))theta_1=theta_1+90
  if(theta_2>45) theta_2=theta_2-90
  if(theta_2<(-45))theta_2=theta_2+90
  if(abs(theta_1-theta_2)>5){
    return(c(theta_1,theta_2))
  }
  else{
    return(theta_1)
  }
}

th=get_orientations(ima_bl)
## Warning in if (theta_2 > 45) theta_2 = theta_2 - 90: the condition has
## length > 1 and only the first element will be used
## Warning in if (theta_2 < (-45)) theta_2 = theta_2 + 90: the condition has
## length > 1 and only the first element will be used
## Warning in if (abs(theta_1 - theta_2) > 5) {: the condition has length > 1
## and only the first element will be used
print(paste0("Global main directions ",th,"Âş"))
## [1] "Global main directions 33Âş"    "Global main directions -27.5Âş"
## [3] "Global main directions 32Âş"
for(theta in th) 
  plot(imrotate(tim,-theta))