library (pracma)

Problem set 1

library (pracma)
## Warning: package 'pracma' was built under R version 3.5.2
knitr::include_graphics("C:\\Users\\sergioor\\Desktop\\Old PC\\CUNY\\DATA605\\hw4set1.png")

1 Write code in R to compute

\(X=A{ A }^{ T }\quad and\quad Y={ A }^{ T }A\)

#create a matrix
A = matrix(c(1,-1, 2,0, 3,4), nrow=2, byrow=FALSE)

#calculate transpose of matrix A
A.transpose = t(A)
A.transpose
##      [,1] [,2]
## [1,]    1   -1
## [2,]    2    0
## [3,]    3    4
#calculate X = matrix A dot product of transpose of matrix A
#results in 2X2 matrix
X = A%*%A.transpose
X
##      [,1] [,2]
## [1,]   14   11
## [2,]   11   17
#calculate Y = transpose of matrix A dot product of matrix A
#results in 3X3 matrix
Y = A.transpose%*%A
Y
##      [,1] [,2] [,3]
## [1,]    2    2   -1
## [2,]    2    4    6
## [3,]   -1    6   25

\(Matrix\quad A=\begin{bmatrix} 1 & 2 & 3 \\ -1 & 0 & 4 \end{bmatrix}\)

Transpose of Matrix \({ A }^{ T }=\begin{bmatrix} 1 & -1 \\ 2 & 0 \\ 3 & 4 \end{bmatrix}\)

Dot Product of Matrix A, X = \({ AA }^{ T }\)

\(X=\begin{bmatrix} 14 & 11 \\ 11 & 17 \end{bmatrix}\)

Dot Product of transpose of matrix A and Matrix \({{A}^{T}}\), Y = \({ A }^{ T }A\)

\(X=\begin{bmatrix} 2 & 2 & -1 \\ 2 & 4 & 6 \\ -1& 6 & 25 \end{bmatrix}\)

2.Compute the eigenvalues and eigenvectors of X and Y using the built-in commands in R

The eigenvalues and eigenvectors will be calculated using R function eigen. Function eigen return two components values and vectors. The vectors are normalied to unit length.

#Compute eigenvalues of x
X.eigenvalues = eigen(X)$values
X.eigenvalues
## [1] 26.601802  4.398198
#Compute eigenvectors of X

X.eigenvectors = eigen(X)$vectors
X.eigenvectors
##           [,1]       [,2]
## [1,] 0.6576043 -0.7533635
## [2,] 0.7533635  0.6576043

\(Eigenvalues\quad of\quad X\quad are\quad \lambda _{ 1 }=26.6018017,\quad \lambda _{ 2 }=4.3981983\) Eigenvectors of X are

For \(\lambda _{ 1 }=26.6018017,\quad { \varepsilon }_{ x }(26.6018017)=\begin{bmatrix} 0.6576043 \\ 0.7533635 \end{bmatrix}\)

\(\lambda _{ 2 }=4.3981983,\quad { \varepsilon }_{ x }(4.3981983)=\begin{bmatrix} -0.7533635 \\ 0.6576043 \end{bmatrix}\)

#Compute eigenvalues of Y
Y.eigenvalues = eigen(Y)$values
Y.eigenvalues
## [1] 2.660180e+01 4.398198e+00 1.058982e-16
#Compute eigenvectors of Y

Y.eigenvectors = eigen(Y)$vectors
Y.eigenvectors
##             [,1]       [,2]       [,3]
## [1,] -0.01856629 -0.6727903  0.7396003
## [2,]  0.25499937 -0.7184510 -0.6471502
## [3,]  0.96676296  0.1765824  0.1849001

\(Eigenvalues\quad of\quad X\quad are\quad \lambda _{ 1 }=26.6018017,\quad \lambda _{ 2 }=4.3981983,\quad \lambda _{ 3 }=-0.00000000000000060986\)

Eignevalue \({ \lambda }_{ 3 }\) has very small value, if rounded to 4 digits it is equal to zero. This value can be considered as mathematically significant, but practically non-significant.

Eigenvectors of Y are:

For \(\lambda _{ 1 }\)= \(\lambda _{ 1 }=26.6018017,\quad { \varepsilon }_{ Y }(26.6018017)=\begin{bmatrix} −0.0185663 \\ 0.2549994 \\ 0.966763 \end{bmatrix}\)

\(\lambda _{ 2 }=4.3981983,\quad { \varepsilon }_{ Y }(4.3981983)=\begin{bmatrix} 0.6727903 \\ 0.718451 \\ −0.1765824 \end{bmatrix}\)

\(\lambda _{ 2 }=−0.00000000000000060986,\quad { \varepsilon }_{ Y }(−0.00000000000000060986)=\begin{bmatrix} 0.7396003 \\ −0.6471502 \\ 0.1849001 \end{bmatrix}\)

3. Compute the left-singular, singular values, and right-singular vectors of A using the svd command

The SVD decomposition of the matrix is computed by: \(A=U\Sigma { V }^{ T }\)

Where \(U,{ V }^{ T }\) are orthogonal matrices and \(\Sigma\) is a diagonal matrix with the singular values.

\(U=\left[ \frac { 1 }{ { \sigma }_{ 1 } } AV_{ 1 }.\frac { 1 }{ { \sigma }_{ 2 } } AV_{ 2 }....\frac { 1 }{ { \sigma }_{ i } } AV_{ i }.\frac { N \left( { A }^{ T } \right) }{ N \left( { A }^{ T } \right) } \right]\)

\(\sigma _{ i }=\sqrt { { \lambda }_{ i } } ,\quad eigenvectors\quad \left\{ { \lambda }_{ 1 },{ \lambda }_{ 2 },...{ \lambda }_{ i } \right\} of\quad { A }^{ T }A\)

\({ V }_{ i }=eigenvectors\left\{ { V }_{ 1 },{ V }_{ 2 },...{ V }_{ i } \right\} of\quad { A }^{ T }A\)

\(N({ A }^{ T })\quad is\quad nullspace\quad of\quad transpose\quad of\quad matrix\quad A\quad and\quad |\quad N({ A }^{ T })|\quad is\quad magnitude\quad of\quad Nullspace\quad of\quad transpose\quad matrix\quad A.\)

\(V^{ T }=transpose\quad of\quad eigenvectors\quad of\quad { A }^{ T }A=({ A }^{ T }A)^{ T }\)

\(\Sigma =diagonal\quad matrix\quad ofsquare\quad root\quad of\quad eigenvalues\quad of\quad { A }^{ T }A\)

Entire calculation can be computed by using R function svd.

#compute svd of matrix A
#dim(A)[1] is rows of matrix
#dim(A)[2] is cols of matrix

A.svd = svd(A, dim(A)[1], dim(A)[2])
#get u vectors
A.svd.u = A.svd$u
#get v vectors
A.svd.v = A.svd$v
#get diagonal matrix
A.svd.d = diag(A.svd$d)
A.svd.d.sq = diag(A.svd$d)%*%diag(A.svd$d)

Left-singular vectors of SVD are

\(U=\left\{ \begin{bmatrix} −0.6576043 \\ −0.7533635\end{bmatrix}\quad\begin{bmatrix} −0.7533635\\0.6576043 \end{bmatrix} \right\}\)

When left-singular vectors of SVD U are multiplied by −1 scalar, they are same as eigenvectors of X

\(\left\{ \begin{bmatrix} 0.6576043 \\ 0.7533635 \end{bmatrix}\begin{bmatrix} −0.7533635 \\ 0.6576043 \end{bmatrix} \right\}\)

Right-singular vectors

\(V=\left\{ \begin{bmatrix} 0.0185663 \\ −0.2549994 \\ −0.966763 \end{bmatrix}\quad \quad \begin{bmatrix} −0.6727903 \\ −0.718451 \\ 0.1765824 \end{bmatrix}\quad \quad \begin{bmatrix} −0.7396003 \\ 0.6471502 \\ −0.1849001 \end{bmatrix} \right\}\)

When right-singular vectors of SVD V are multiplied by −1 scalar, they are same as eigenvectors of Y

\(V=\left\{ \begin{bmatrix} -0.0185663 \\ 0.2549994 \\ 0.966763 \end{bmatrix}\quad \quad \begin{bmatrix} 0.6727903 \\ 0.718451 \\ -0.1765824 \end{bmatrix}\quad \quad \begin{bmatrix} 0.7396003 \\ -0.6471502 \\ 0.1849001 \end{bmatrix} \right\}\)

Singular Values

\(\Sigma =\left\{ \begin{bmatrix} 5.1576934 \\ 0 \end{bmatrix}\quad \quad \begin{bmatrix} 0 \\ 2.0971882 \ \end{bmatrix}\quad \right\}\)

4. Two non-zero eigenvalues (the 3rd value will be very close to zero, if not zero) of both X and Y are the same and are squares of the non-zero singular values of A

Square of singular values A are same as eigenvalues of X and Y

Eigenvalues of X are 26.6018017 and 4.3981983

Eigenvalues of Y are 26.6018017 and 4.3981983

, third one is left out because it is close to zero.

Square of singular values A are 26.6018017 and 4.3981983

Using above U, V and ∑ we cannot arrive at orginal matrix A, because V is 3X3 matrix and other matrices are 2X2. In this case, we can ignore third vector of V as it is near close to zero eigenvalue.

#if rows and cols dimensions are not passed svd will result in non-zero left-singular and right-singular vectors.
A.svd = svd(A)
#get u vectors
A.svd.u = A.svd$u
#get v vectors
A.svd.v = A.svd$v
#get diagonal matrix
A.svd.d = diag(A.svd$d)

#perform dot product of u, d and transpose of v to get orginal matrix
round(A.svd.u%*%A.svd.d %*%t(A.svd.v),0)
##      [,1] [,2] [,3]
## [1,]    1    2    3
## [2,]   -1    0    4
A==round(A.svd.u%*%A.svd.d %*%t(A.svd.v),0)
##      [,1] [,2] [,3]
## [1,] TRUE TRUE TRUE
## [2,] TRUE TRUE TRUE

Problem set 2

knitr::include_graphics("C:\\Users\\sergioor\\Desktop\\Old PC\\CUNY\\DATA605\\hw4set2.png")

Following is a custom function to calculate inverse of matrix. Function returns messages if given matrix is not a square matrix or its determinant is zero.

myinverse <- function(A){
  mRows <- nrow(A)
  mCols <- ncol(A)

  if (mRows != mCols) {
    #if rows != cols such matrix does not have inverse
    print ("Not a square matrix. Inverse cannot be calculated ...")
    return(-1)
  }
  
  if (round(det(A),0) == 0) {
    #if determinant is zero inverse cannot be calculated. Because 1 divide by determinant is infinity. 
    print ("This is a singular matrix as determinant is 0. Inverse cannot be calculated ...")
    return(-1)
  }
  
  #once matrix satisfies above conditions, initiate return matrix
  retM <- matrix(rep(0,length(A)), nrow = mRows, ncol = mCols)
  
  #loop columns and rows
  for(i in 1:mCols){
    for (j in 1:mRows){
      #calculate cofactors
      retM[j,i] = det(A[-j,-i])
      #change sign for alternative cell
      if ((j+i) %% 2 != 0){
        retM[j,i] = -1 * retM[j,i]
      }
    }
  }
  
  #apply tranpose and divide by determinant
  #resultant matrix is inverse matrix
  retM = t(retM)/det(A)

  return(retM)
}

Function test cases

#given matrix is A is non square matrix
A = matrix(c(1,-1, 2,0, 3,4), nrow=2, byrow=FALSE)
#calculate inverse using custom function
B = myinverse(A) 
## [1] "Not a square matrix. Inverse cannot be calculated ..."
#given matrix is A is a square matrix, with determinant as zero
A = matrix(c(1,3,8, 2,0,4, 1,4,10), nrow=3, byrow=FALSE)
det(A)
## [1] 0
#calculate inverse using custom function
B = myinverse(A) 
## [1] "This is a singular matrix as determinant is 0. Inverse cannot be calculated ..."
#given matrix is A is square matrix
A = matrix(c(1,3,2,1, 0,0,1,0, 2,0,4,5, -1,5,-3,0), nrow=4, byrow=FALSE)
#calculate inverse using custom function
B = myinverse(A)
#inverse matrix of A
B
##            [,1]        [,2] [,3]       [,4]
## [1,]  0.8333333  0.16666667    0 -0.3333333
## [2,] -2.5000000  0.10000000    1  0.2000000
## [3,] -0.1666667 -0.03333333    0  0.2666667
## [4,] -0.5000000  0.10000000    0  0.2000000
#dot product of matrix A and B should be equal to identity matrix
round(A%*%B,0)
##      [,1] [,2] [,3] [,4]
## [1,]    1    0    0    0
## [2,]    0    1    0    0
## [3,]    0    0    1    0
## [4,]    0    0    0    1
#given matrix is A is a square matrix
A = matrix(c(1,2,4, 2,3,9, 1,3,6), nrow=3, byrow=FALSE)
#calculate inverse using custom function
B = myinverse(A)
#inverse matrix of A
B
##      [,1]       [,2]       [,3]
## [1,]    3  1.0000000 -1.0000000
## [2,]    0 -0.6666667  0.3333333
## [3,]   -2  0.3333333  0.3333333
#dot product of matrix A and B should be equal to identity matrix
round(A%*%B,0)
##      [,1] [,2] [,3]
## [1,]    1    0    0
## [2,]    0    1    0
## [3,]    0    0    1