05/03
\[ \textbf{Finding the MLE for } \theta \textbf{ in the Rayleigh Distribution} \]Given PDF:
\[ f(x|\theta) = \frac{x}{\theta^2} e^{-\frac{x^2}{2\theta^2}}, \quad x \geq 0, \theta > 0 \]
Let \(X_1, X_2, \dots, X_n\) be i.i.d. from this distribution.
Step 1: Likelihood Function
\[ L(\theta) = \prod_{i=1}^{n} f(x_i|\theta) = \prod_{i=1}^{n} \left( \frac{x_i}{\theta^2} \cdot e^{-x_i^2/(2\theta^2)} \right) = \frac{1}{\theta^{2n}} \left( \prod_{i=1}^{n} x_i \right) \cdot \exp\left(-\sum_{i=1}^{n} \frac{x_i^2}{2\theta^2} \right) \]
Step 2: Log-Likelihood
\[ L(\theta) = \prod_{i=1}^{n} f(x_i|\theta) = \prod_{i=1}^{n} \left( \frac{x_i}{\theta^2} e^{-\frac{x_i^2}{2\theta^2}} \right) = \left( \prod_{i=1}^{n} \frac{x_i}{\theta^2} \right) \cdot \exp \left( -\sum_{i=1}^{n} \frac{x_i^2}{2\theta^2} \right) \]
\[ \prod_{i=1}^{n} \frac{x_i}{\theta^2} = \frac{\prod_{i=1}^{n} x_i}{\theta^{2n}} \]
\[ \exp \left( -\sum_{i=1}^{n} \frac{x_i^2}{2\theta^2} \right) = e^{-\frac{1}{2\theta^2} \sum_{i=1}^{n} x_i^2} \]
Therefore:
\[ L(\theta) = \frac{\prod_{i=1}^{n} x_i}{\theta^{2n}} \cdot e^{-\frac{1}{2\theta^2} \sum_{i=1}^{n} x_i^2} \]
Now take logarithms:
\[ \ell(\theta) = \ln L(\theta) = \ln \left( \frac{\prod x_i}{\theta^{2n}} \right) + \ln \left( e^{-\frac{1}{2\theta^2} \sum x_i^2} \right) \\ = \sum_{i=1}^{n} \ln x_i - 2n \ln \theta - \frac{1}{2\theta^2} \sum_{i=1}^{n} x_i^2 \]Step 3: Score Function (Derivative of log-likelihood)
\[ \frac{d\ell}{d\theta} = -\frac{2n}{\theta} + \frac{1}{\theta^3} \sum_{i=1}^{n} x_i^2 \]Step 4: Set Score to Zero and Solve
\[ -\frac{2n}{\theta} + \frac{1}{\theta^3} \sum_{i=1}^{n} x_i^2 = 0 \\ \Rightarrow \frac{1}{\theta^3} \sum x_i^2 = \frac{2n}{\theta} \\ \Rightarrow \frac{\sum x_i^2}{\theta^2} = 2n \\ \Rightarrow \theta^2 = \frac{1}{2n} \sum_{i=1}^{n} x_i^2 \]
Step 5: Final Answer (MLE of \(\theta\))
\[ \boxed{ \hat{\theta} = \sqrt{ \frac{1}{2n} \sum_{i=1}^{n} x_i^2 } } \]
Suppose :
\[
f(x | \theta) = (1 + \theta)x^{\theta} \\
\text{For : }\\
x\in[0,1], \theta>-1 \\
\text{IID } x_i \text{ with } X_{dist}
\]
Step 1 : Take Ln(log-likelyhood func)
\[
ln(f(x | \theta)) \\
= ln((1 + \theta)x^{\theta}) \\
= ln(1 + \theta) + ln(x^{\theta}) \\
= ln(1 + \theta) + \theta ln(x) \\
= l(\theta)
\]
Step 2 : Take first-partial :
\[ \frac{\partial}{\partial\theta}l(\theta) \\ = \frac{1}{1 + \theta} + ln(x) \\ = S_{core}(\theta) \]
Step 3 : Take square of score and then take \(\mathbb{E}\) :
\[ \mathbb{E}([S_{core}(\theta)]^2) \\ = \int_{0}^{1}S_{core}(\theta)^2*f(x | \theta)dx \\ = \int_{0}^{1} [\frac{1}{1 + \theta} + ln(x)]^2 f(x | \theta)dx \\ \implies c \in \mathbb{R}, c=\frac{1}{1 + \theta} \\ \therefore \\ = \int_{0}^{1} [c + ln(x)]^2 f(x | \theta)dx \\ =\int_{0}^{1} [c^2+2cln(x)+ln^2(x)]*f(x | \theta) \]
\[ = c^2 \int_{0}^{1} f(x|\theta)\,dx + 2c \int_{0}^{1} \ln(x) f(x|\theta)\,dx + \int_{0}^{1} \ln^2(x) f(x|\theta)\,dx \\ = I_1 + I_2 + I_3 \]
\[ I_1 \\= c^2 \int_{0}^{1} f(x|\theta)\,dx \\ = c^2 \int_{0}^{1} (1+\theta)x^{\theta} dx \\ = c^2 (1+\theta) \int_{0}^{1} x^{\theta} dx \\ = c^2 (1+\theta) \cdot [\frac{x^{\theta + 1}}{\theta + 1}]_{0}^{1} \\ = c^2 (1+\theta) \cdot \frac{1}{\theta + 1} \\ = c^2 \]
\[ I_2 \\ = 2c \int_{0}^{1} \ln(x) f(x|\theta)\,dx \\ = 2c \int_{0}^{1} \ln(x) (1+\theta) x^{\theta} dx \\ = 2c(1+\theta) \int_{0}^{1} \ln(x) x^{\theta} dx \\ = 2c(1+\theta) \cdot I \]
\[ I = \\ \int \ln(x) \cdot x^a \, dx \\\text{Use integration by parts: } \int u \, dv = uv - \int v \, du \\u = \ln(x), \quad dv = x^a \, dx \\du = \frac{1}{x} \, dx, \quad v = \frac{x^{a+1}}{a+1} \\\Rightarrow I = \ln(x) \cdot \frac{x^{a+1}}{a+1} - \int \frac{x^{a+1}}{a+1} \cdot \frac{1}{x} \, dx \\= \frac{x^{a+1} \ln(x)}{a+1} - \frac{1}{a+1} \int x^{a} \, dx \\= \frac{x^{a+1} \ln(x)}{a+1} - \frac{1}{a+1} \cdot \frac{x^{a+1}}{a+1} \\= \frac{x^{a+1}}{a+1} \left( \ln(x) - \frac{1}{a+1} \right) + C \]
\[ \int_0^1 \ln(x) \cdot x^a \, dx = \left[ \frac{x^{a+1}}{a+1} \left( \ln(x) - \frac{1}{a+1} \right) \right]_0^1 \\= \left( \frac{1^{a+1}}{a+1} \left( \ln(1) - \frac{1}{a+1} \right) \right) - \lim_{x \to 0^+} \left( \frac{x^{a+1}}{a+1} \left( \ln(x) - \frac{1}{a+1} \right) \right) \\= \left( \frac{1}{a+1} (0 - \frac{1}{a+1}) \right) - \lim_{x \to 0^+} \left( \frac{x^{a+1} \ln(x)}{a+1} - \frac{x^{a+1}}{(a+1)^2} \right) \\= -\frac{1}{(a+1)^2} - 0 \quad \text{(since } x^{a+1} \ln x \to 0 \text{ as } x \to 0^+ \text{ for } a > -1) \\= -\frac{1}{(a+1)^2}\\ = -\frac{1}{(\theta+1)^2} \]
\[ I_3 \\= \int_{0}^{1} \ln^2(x) f(x|\theta)\,dx \\= \int_{0}^{1} \ln^2(x) \cdot (1+\theta) x^\theta \, dx \\= (1+\theta) \int_{0}^{1} \ln^2(x) \cdot x^\theta \, dx \]
\[ \text{Let } a = \theta, \quad \text{so we compute: } \int_0^1 \ln^2(x) \cdot x^a \, dx \\\text{Use integration by parts: } \int u \, dv = uv - \int v \, du \\\text{Let } u = \ln^2(x), \quad dv = x^a dx \\\Rightarrow du = 2 \ln(x) \cdot \frac{1}{x} dx, \quad v = \frac{x^{a+1}}{a+1} \\\Rightarrow \int \ln^2(x) \cdot x^a \, dx = \frac{x^{a+1}}{a+1} \cdot \ln^2(x) \Big|_0^1 - \int_0^1 \frac{x^{a+1}}{a+1} \cdot 2\ln(x) \cdot \frac{1}{x} dx \\= \left[ \frac{x^{a+1} \ln^2(x)}{a+1} \right]_0^1 - \frac{2}{a+1} \int_0^1 x^a \ln(x) dx \\\text{Now evaluate the boundary term: } \left[ \frac{x^{a+1} \ln^2(x)}{a+1} \right]_0^1 = 0 \\\text{(since both } x^{a+1} \to 0 \text{ and } \ln^2(x) \to \infty \text{ but } x^{a+1} \ln^2(x) \to 0 \text{ as } x \to 0^+ \text{ for } a > -1) \\\text{Now use the earlier result: } \int_0^1 x^a \ln(x) dx = -\frac{1}{(a+1)^2} \\\Rightarrow \int_0^1 \ln^2(x) \cdot x^a dx = 0 - \frac{2}{a+1} \cdot \left(-\frac{1}{(a+1)^2} \right) \\= \frac{2}{(a+1)^3} \\\text{Recall: } I_3 = (1+\theta) \cdot \int_0^1 \ln^2(x) \cdot x^\theta dx = (1+\theta) \cdot \frac{2}{(1+\theta)^3} \\= \frac{2}{(1+\theta)^2} \]
\[ \therefore \\ I_1 + I_2 + I_3 \\ = (\frac{1}{1 + \theta})^2 -\frac{1}{(\theta+1)^2} + \frac{2}{(1+\theta)^2}\\ =\frac{2}{(1+\theta)^2} \\ I(\theta) \]
Suppose a sample of n independent observations are to be taken and the maximum likelihood estimator will be used to estimate the unknown parameter \(\theta\). Find the asymptotic variance of the MLE and give a 95% confidence interval of \(\theta\) approximately for large n
Step 1: Recall Fisher Information from part
\[ I(\theta) = \frac{2}{(1+\theta)^2} \]Step 2: Asymptotic Distribution of MLE
For a sample of size \(n\), the MLE \(\hat{\theta}\) is approximately normal:
\[ \hat{\theta} \sim \mathcal{N} \left( \theta, \frac{1}{n I(\theta)} \right) \]
Substitute the Fisher Information:
\[ \text{Var}(\hat{\theta}) \approx \frac{1}{n} \cdot \frac{(1+\theta)^2}{2} \]
Step 3: 95% Confidence Interval
Using \(z_{0.975} = 1.96\), the 95% CI for \(\theta\) is:
\[ \hat{\theta} \pm z_{0.975} \cdot \sqrt{\text{Var}(\hat{\theta})} \\ = \hat{\theta} \pm 1.96 \cdot \sqrt{ \frac{(1+\hat{\theta})^2}{2n} } \]
Simplify:
\[ \boxed{ \text{CI}_{0.95} = \hat{\theta} \pm \frac{1.96(1+\hat{\theta})}{\sqrt{2n}} } \]
Step 1: Recall MLE
\[
\boxed{
\hat{\theta} = \sqrt{ \frac{1}{2n} \sum_{i=1}^{n} x_i^2 }
}
\]
Step 2: Likelihood and Log-Likelihood
For the Rayleigh distribution, the PDF is: \[ f(x| \theta) = \frac{x}{\theta^2} \exp\left(-\frac{x^2}{2\theta^2}\right), \quad x > 0 \]
The log-likelihood is : \[ \ell(\theta) = \sum_{i=1}^{n} \log(x_i) - 2n \log(\theta) - \frac{1}{2\theta^2} \sum_{i=1}^{n} x_i^2 \]
Step 3: Score Function
Differentiate with respect to \(\theta\): \[ \ell'(\theta) = -\frac{2n}{\theta} + \frac{1}{\theta^3} \sum_{i=1}^{n} x_i^2 \]
Step 4: Fisher Information
Differentiate again: \[ \ell''(\theta) = \frac{2n}{\theta^2} - \frac{3}{\theta^4} \sum_{i=1}^{n} x_i^2 \]
We want to compute the Fisher Information: \[ \mathcal{I}(\theta) = -\mathbb{E} \left[ \ell''(\theta) \right] \]
From earlier, we found the second derivative of the log-likelihood: \[ \ell''(\theta) = \frac{2n}{\theta^2} - \frac{3}{\theta^4} \sum_{i=1}^{n} x_i^2 \]
Now apply the expectation: \[ -\mathbb{E} \left[ \ell''(\theta) \right] = -\mathbb{E} \left[ \frac{2n}{\theta^2} - \frac{3}{\theta^4} \sum_{i=1}^{n} x_i^2 \right] \]
Now apply linearity of expectation:
\[ = -\left( \mathbb{E} \left[ \frac{2n}{\theta^2} \right] - \mathbb{E} \left[ \frac{3}{\theta^4} \sum_{i=1}^{n} X_i^2 \right] \right) \]
Since \(\frac{2n}{\theta^2}\) is a constant:
\[ = -\left( \frac{2n}{\theta^2} - \frac{3}{\theta^4} \sum_{i=1}^{n} \mathbb{E}[X_i^2] \right) \]
\[ \mathbb{E}[X_i^2] = \int_0^{\infty} x^2 \cdot \frac{x}{\theta^2} \exp\left( -\frac{x^2}{2\theta^2} \right) \, dx = \frac{1}{\theta^2} \int_0^{\infty} x^3 \exp\left( -\frac{x^2}{2\theta^2} \right) \, dx \]
Let us use the substitution: \[ u = \frac{x^2}{2\theta^2} \quad \Rightarrow \quad x = \theta \sqrt{2u}, \quad dx = \theta \cdot \frac{1}{\sqrt{2u}} \, du \]
Then, \[ x^3 = (\theta \sqrt{2u})^3 = \theta^3 (2u)^{3/2} \]
Substitute into the integral: \[ \mathbb{E}[X_i^2] = \frac{1}{\theta^2} \int_0^{\infty} \theta^3 (2u)^{3/2} \exp(-u) \cdot \frac{1}{\sqrt{2u}} \, du \]
\[ = \frac{\theta^3}{\theta^2} \int_0^{\infty} (2u)^{3/2 - 1/2} \exp(-u) \, du = \theta \int_0^{\infty} 2u \exp(-u) \, du \]
Now apply the gamma function identity: \[ \int_0^{\infty} u^k e^{-u} \, du = \Gamma(k+1) \]
So: \[ \int_0^{\infty} 2u e^{-u} \, du = 2 \cdot \Gamma(2) = 2 \cdot 1! = 2 \]
Therefore: \[ \mathbb{E}[X_i^2] = \theta \cdot 2 = 2\theta^2 \]
\[ \sum_{i=1}^{n} \mathbb{E}[X_i^2] = n \cdot \mathbb{E}[X^2] = n \cdot 2\theta^2 = 2n\theta^2 \]
So recall :
\[ \mathcal{I}(\theta) = \\ = -\left( \frac{2n}{\theta^2} - \frac{3}{\theta^4} \sum_{i=1}^{n} \mathbb{E}[X_i^2] \right) \\ -\left( \frac{2n}{\theta^2} - \frac{3}{\theta^4} \cdot 2n\theta^2 \right)\\ = -\left( \frac{2n}{\theta^2} - \frac{6n\theta^2}{\theta^4} \right) \\= -\left( \frac{2n}{\theta^2} - \frac{6n}{\theta^2} \right) \\= -\left( -\frac{4n}{\theta^2} \right) = \frac{4n}{\theta^2} \]
\[ \boxed{ \mathcal{I}(\theta) = \frac{4n}{\theta^2} } \]
We found from Part (a) that the Fisher information is:
\[ \mathcal{I}(\theta) = \frac{4n}{\theta^2} \]
From asymptotic theory, the variance of the MLE is approximately the inverse of the Fisher information:
\[ \operatorname{Var}(\hat{\theta}) \approx \left[ \mathcal{I}(\theta) \right]^{-1} = \frac{\theta^2}{4n} \]
Thus, the asymptotic variance is:
\[ \boxed{ \operatorname{Var}(\hat{\theta}) \approx \frac{\theta^2}{4n} } \]
The MLE \(\hat{\theta}\) is approximately normally distributed:
\[ \hat{\theta} \sim \mathcal{N}\left(\theta, \frac{\theta^2}{4n} \right) \]
The standard error is:
\[ \text{SE}(\hat{\theta}) = \sqrt{ \frac{\theta^2}{4n} } = \frac{\theta}{2\sqrt{n}} \]
Using the normal approximation and plugging in \(\hat{\theta}\) for \(\theta\), the 95% confidence interval is:
\[ \hat{\theta} \pm 1.96 \cdot \frac{\hat{\theta}}{2\sqrt{n}} = \hat{\theta} \left( 1 \pm \frac{0.98}{\sqrt{n}} \right) \]
\[ \boxed{ \text{95% CI} \approx \hat{\theta} \left( 1 \pm \frac{0.98}{\sqrt{n}} \right) } \]