$$ \newcommand{\defeq}{\stackrel{\small\bullet}{=}} \newcommand{\ra}{\rangle} \newcommand{\la}{\langle} \newcommand{\norm}[1]{\left\|#1\right\|} \newcommand{\abs}[1]{\left\lvert#1\right\rvert} \newcommand{\Abs}[1]{\Bigl\lvert#1\Bigr\rvert} \newcommand{\pr}{{\mathbb P}} \newcommand{\qr}{{\mathbb Q}} \newcommand{\xv}{{\boldsymbol{x}}} \newcommand{\av}{{\boldsymbol{a}}} \newcommand{\bv}{{\boldsymbol{b}}} \newcommand{\cv}{{\boldsymbol{c}}} \newcommand{\dv}{{\boldsymbol{d}}} \newcommand{\ev}{{\boldsymbol{e}}} \newcommand{\fv}{{\boldsymbol{f}}} \newcommand{\gv}{{\boldsymbol{g}}} \newcommand{\hv}{{\boldsymbol{h}}} \newcommand{\nv}{{\boldsymbol{n}}} \newcommand{\sv}{{\boldsymbol{s}}} \newcommand{\tv}{{\boldsymbol{t}}} \newcommand{\uv}{{\boldsymbol{u}}} \newcommand{\vv}{{\boldsymbol{v}}} \newcommand{\wv}{{\boldsymbol{w}}} \newcommand{\zerov}{{\mathbf{0}}} \newcommand{\onev}{{\mathbf{0}}} \newcommand{\phiv}{{\boldsymbol{\phi}}} \newcommand{\cc}{{\check{C}}} \newcommand{\xv}{{\boldsymbol{x}}} \newcommand{\Xv}{{\boldsymbol{X}\!}} \newcommand{\yv}{{\boldsymbol{y}}} \newcommand{\Yv}{{\boldsymbol{Y}}} \newcommand{\zv}{{\boldsymbol{z}}} \newcommand{\Zv}{{\boldsymbol{Z}}} \newcommand{\Iv}{{\boldsymbol{I}}} \newcommand{\Jv}{{\boldsymbol{J}}} \newcommand{\Cv}{{\boldsymbol{C}}} \newcommand{\Ev}{{\boldsymbol{E}}} \newcommand{\Fv}{{\boldsymbol{F}}} \newcommand{\Gv}{{\boldsymbol{G}}} \newcommand{\Hv}{{\boldsymbol{H}}} \newcommand{\alphav}{{\boldsymbol{\alpha}}} \newcommand{\epsilonv}{{\boldsymbol{\epsilon}}} \newcommand{\betav}{{\boldsymbol{\beta}}} \newcommand{\deltav}{{\boldsymbol{\delta}}} \newcommand{\gammav}{{\boldsymbol{\gamma}}} \newcommand{\etav}{{\boldsymbol{\eta}}} \newcommand{\piv}{{\boldsymbol{\pi}}} \newcommand{\thetav}{{\boldsymbol{\theta}}} \newcommand{\tauv}{{\boldsymbol{\tau}}} \newcommand{\muv}{{\boldsymbol{\mu}}} \newcommand{\phiinv}{\Phi^{-1}} \newcommand{\Fiinv}{F^{-1}} \newcommand{\giinv}{g^{-1}} \newcommand{\fhat}{\hat{f}} \newcommand{\ghat}{\hat{g}} \newcommand{\ftheta}{f_\theta} \newcommand{\fthetav}{f_{\thetav}} \newcommand{\gtheta}{g_\theta} \newcommand{\gthetav}{g_{\thetav}} \newcommand{\ztheta}{Z_\theta} \newcommand{\xtheta}{\Xv_\theta} \newcommand{\ytheta}{\Yv_\theta} \newcommand{\p}{\partial} \newcommand{\f}{\frac} \newcommand{\cf}{\cfrac} \newcommand{\e}{\epsilon} \newcommand{\indep}{\perp\kern-5pt \perp} \newcommand{\inner}[1]{\langle#1\rangle} \newcommand{\pa}[1]{\left(#1\right)} \newcommand{\pb}[1]{\left\{#1\right\}} \newcommand{\pc}[1]{\left[#1\right]} \newcommand{\pA}[1]{\Big(#1\Big)} \newcommand{\pB}[1]{\Big\{#1\Big\}} \newcommand{\pC}[1]{\Big[#1\Big]} \newcommand{\ty}[1]{\texttt{#1}} \newcommand{\borel}[1]{\mathscr{B}\pa{#1}} \newcommand{\scr}{\mathcal} \newcommand{\scrb}{\mathscr} \newcommand{\argmin}{\mathop{\text{arg}\ \!\text{min}}} \newcommand{\arginf}{\mathop{\text{arg}\ \!\text{inf}}} \newcommand{\argmax}{\mathop{\text{arg}\ \!\text{max}}} \newcommand{\argsup}{\mathop{\text{arg}\ \!\text{sup}}} \newcommand{\bigo}[1]{\mathcal{O}_{p}\!\left(#1\right)} \newcommand{\f}{\frac} \newcommand{\e}{\epsilon} \newcommand{\inv}{^{-1}} \newcommand{\phiinv}{\Phi^{-1}} \newcommand{\Fiinv}{F^{-1}} \newcommand{\giinv}{g^{-1}} \newcommand{\fhat}{\hat{f}} \newcommand{\ghat}{\hat{g}} \newcommand{\ftheta}{f_\theta} \newcommand{\fthetav}{f_{\thetav}} \newcommand{\gtheta}{g_\theta} \newcommand{\gthetav}{g_{\thetav}} \newcommand{\ztheta}{Z_\theta} \newcommand{\xtheta}{\Xv_\theta} \newcommand{\ytheta}{\Yv_\theta} \newcommand{\absdet}[1]{\abs{\det\pa{#1}}} \newcommand{\jac}[1]{\Jv_{#1}} \newcommand{\absdetjx}[1]{\abs{\det\pa{\Jv_{#1}}}} \newcommand{\absdetj}[1]{\norm{\Jv_{#1}}} \newcommand{\sint}{sin(\theta)} \newcommand{\cost}{cos(\theta)} \newcommand{\sor}[1]{S\mathcal{O}(#1)} \newcommand{\ort}[1]{\mathcal{O}(#1)} \newcommand{\A}{{\mathcal A}} \newcommand{\C}{{\mathbb C}} \newcommand{\E}{{\mathbb E}} \newcommand{\F}{{\mathcal{F}}} \newcommand{\N}{{\mathbb N}} \newcommand{\R}{{\mathbb R}} \newcommand{\Q}{{\mathbb Q}} \newcommand{\Z}{{\mathbb Z}} \newcommand{\X}{{\mathbb{X}}} \newcommand{\Y}{{\mathbb{Y}}} \newcommand{\G}{{\mathcal{G}}} \newcommand{\M}{{\mathcal{M}}} \newcommand{\betaequivalent}{\beta\text{-equivalent}} \newcommand{\betaequivalence}{\beta\text{-equivalence}} \newcommand{\Mb}{{\boldsymbol{\mathsf{M}}}} \newcommand{\Br}{{\mathbf{\mathsf{Bar}}}} \newcommand{\dgm}{{\mathfrak{Dgm}}} \newcommand{\Db}{{\mathbf{\mathsf{D}}}} \newcommand{\Img}{{\mathbf{\mathsf{Img}}}} \newcommand{\mmd}{{\mathbf{\mathsf{MMD}}}} \newcommand{\Xn}{{\mathbb{X}_n}} \newcommand{\Xm}{{\mathbb{X}_m}} \newcommand{\Yn}{{\mathbb{Y}_n}} \newcommand{\Ym}{Y_1, Y_2, \cdots, Y_m} \newcommand{\Xb}{{\mathbb{X}}} \newcommand{\Yb}{{\mathbb{Y}}} \newcommand{\s}{{{\sigma}}} \newcommand{\fnsbar}{{\bar{f}^n_\s}} \newcommand{\fns}{{f^n_\s}} \newcommand{\fs}{{f_\s}} \newcommand{\fsbar}{{\bar{f}_\s}} \newcommand{\barfn}{{{f}^n_\sigma}} \newcommand{\barfnm}{{{f}^{n+m}_\sigma}} \newcommand{\barfo}{{{f}_\sigma}} \newcommand{\fn}{{f^n_{\rho,\sigma}}} \newcommand{\fnm}{{f^{n+m}_{\rho,\sigma}}} \newcommand{\fo}{{f_{\rho,\sigma}}} \newcommand{\K}{{{K_{\sigma}}}} \newcommand{\barpn}{{\bar{p}^n_\sigma}} \newcommand{\barpo}{{\bar{p}_\sigma}} \newcommand{\pn}{{p^n_\sigma}} \newcommand{\po}{{p_\sigma}} \newcommand{\J}{{\mathcal{J}}} \newcommand{\B}{{\mathcal{B}}} \newcommand{\pt}{{\tilde{\mathbb{P}}}} \newcommand{\Winf}{{W_{\infty}}} \newcommand{\winf}{{W_{\infty}}} \newcommand{\HH}{{{\scr{H}_{\sigma}}}} \newcommand{\D}{{{\scr{D}_{\sigma}}}} \newcommand{\Ts}{{T_{\sigma}}} \newcommand{\Phis}{{\Phi_{\sigma}}} \newcommand{\nus}{{\nu_{\sigma}}} \newcommand{\Qs}{{\mathcal{Q}_{\sigma}}} \newcommand{\ws}{{w_{\sigma}}} \newcommand{\vs}{{v_{\sigma}}} \newcommand{\ds}{{\delta_{\sigma}}} \newcommand{\fp}{{f_{\pr}}} \newcommand{\prs}{{\widetilde{\pr}_{\sigma}}} \newcommand{\qrs}{{\widetilde{\qr}_{\sigma}}} \newcommand{\Inner}[1]{\Bigl\langle#1\Bigr\rangle} \newcommand{\innerh}[1]{\langle#1\rangle_{\HH}} \newcommand{\Innerh}[1]{\Bigl\langle#1\Bigr\rangle_{\HH}} \newcommand{\normh}[1]{\norm{#1}_{\HH}} \newcommand{\norminf}[1]{\norm{#1}_{\infty}} \newcommand{\gdelta}{{\G_{\delta}}} \newcommand{\supgdelta}{{\sup\limits_{g\in\gdelta}\abs{\Delta_n(g)}}} \newcommand{\id}{\text{id}} \newcommand{\supp}{\text{supp}} \newcommand{\cech}{\v{C}ech} \newcommand{\Zz}{{\scr{Z}}} \newcommand{\psis}{\psi_\s} \newcommand{\phigox}{\Phis(\xv)-g} \newcommand{\phigoy}{\Phis(\yv)-g} \newcommand{\fox}{{f^{\epsilon,{\xv}}_{\rho,\sigma}}} \newcommand{\prx}{{\pr^{\epsilon}_{\xv}}} \newcommand{\pro}{{\pr_0}} \newcommand{\dotfo}{\dot{f}_{\!\!\rho,\s}} \newcommand{\phifo}{{\Phis(\yv)-\fo}} \newcommand{\phifox}{{\Phis(\xv)-\fo}} \newcommand{\kinf}{{\norm{\K}_{\infty}}} \newcommand{\half}{{{\f{1}{2}}}} \newcommand{\Jx}{\J_{\epsilon,{\xv}}} \newcommand{\dpy}{\text{differential privacy}} \newcommand{\edpy}{$\epsilon$--\text{differential privacy}} \newcommand{\eedpy}{$\epsilon$--edge \text{differential privacy}} \newcommand{\dpe}{\text{differentially private}} \newcommand{\edpe}{$\epsilon$--\text{differentially private}} \newcommand{\eedpe}{$\epsilon$--edge \text{differentially private}} \newcommand{\er}{Erdős-Rényi} \newcommand{\krein}{Kreĭn} % \newcommand{\grdpg}{\mathsf{gRDPG}} % \newcommand{\rdpg}{\mathsf{RDPG}} % \newcommand{\eflip}{{\textsf{edgeFlip}}} % \newcommand{\grdpg}{\text{gRDPG}} % \newcommand{\rdpg}{\text{RDPG}} \newcommand{\grdpg}{\mathsf{gRDPG}} \newcommand{\rdpg}{\mathsf{RDPG}} \newcommand{\eflip}{{\text{edgeFlip}}} \newcommand{\I}{{\mathbb I}} \renewcommand{\pa}[1]{\left(#1\right)} \renewcommand{\pb}[1]{\left\{#1\right\}} \renewcommand{\pc}[1]{\left[#1\right]} \renewcommand{\V}{\mathbb{V}} \renewcommand{\W}{\mathbb{W}} %%%%%%%%%%%%%%%%%%%%%%%%%%% \providecommand{\fd}{\frac 1d} % \renewcommand{\fpp}{{\frac 1p}} \providecommand{\pfac}{\f{p}{p-1}} \providecommand{\ipfac}{\f{p-1}{p}} \providecommand{\dbq}{\Delta b_{n,m,Q}\qty(\qty{\xvo})} \providecommand{\db}{\Delta b_{n,m}\qty(\qty{\xvo})} \providecommand{\bbv}{{{\mathbb{V}}}} \providecommand{\bbw}{{{\mathbb{W}}}} \providecommand{\md}{\textsf{MoM Dist}} \providecommand{\bF}{{\mathbf{F}}} \providecommand{\sub}{{\text{Sub}}} \providecommand{\samp}{\text{$\pa{\scr{S}}$}} \providecommand{\tp}{{2^{\f{p-1}{p}}}} %%%%%%%%%%%%%%%%%%%%%%%%%% \providecommand{\Xmn}{{\mathbb{X}_{n+m}}} \newcommand{\Dnmq}{\D[n+m, Q]} \newcommand{\Dnmh}{\D[n+m, \H]} \newcommand{\Dn}{\D[n]} \providecommand{\xvo}{\xv_0} \providecommand{\bn}[1][\null]{b^{#1}_{n}\pa{\pb{\xvo}}} \providecommand{\bnm}[1][\null]{b^{#1}_{n+m}\pa{\pb{\xvo}}} \providecommand{\bnq}[1][\null]{b^{#1}_{n,Q}\pa{\pb{\xvo}}} \providecommand{\bnmq}[1][\null]{b^{#1}_{n+m,Q}\pa{\pb{\xvo}}}\providecommand{\prq}{\pr_q} \providecommand{\dxvo}{{\delta_{\xvo}}} \providecommand{\sq}{S_q} \providecommand{\Sq}{\abs{S_q}} \providecommand{\no}{{n_o}} \providecommand{\mmdn}{\mmd\pa{\pr_n, \delta_{\xvo}}} \newcommand{\rqt}{\xi_{q}(t; n, Q)} \providecommand{\nq}{\f{n}{Q}} \providecommand{\Ot}{\Omega(t, n/Q)} \providecommand{\ut}[1]{U^{#1}} \providecommand{\vt}[1]{V^{#1}} \providecommand{\wt}[1]{W^{#1}} \providecommand{\but}[1]{\mathbb{U}^{#1}} \providecommand{\bvt}[1]{\mathbb{V}^{#1}} \providecommand{\bwt}[1]{\mathbb{W}^{#1}} \providecommand{\ball}[1]{B_{f\!, \rho}\pa{#1}} \newcommand*{\medcap}{\mathbin{\scalebox{0.75}{{\bigcap}}}}% \newcommand*{\medcup}{\mathbin{\scalebox{0.75}{{\bigcup}}}}% \providecommand{\dsf}{\mathsf{d}} \newcommand{\Dnh}{{\mathsf{D}_{n,\scr{H}}}} \newcommand{\Dph}{{\mathsf{D}_{\pr,\scr{H}}}} \newcommand{\D}[1][1={ },usedefault]{{\mathsf{D}_{#1}}} \newcommand{\Dnq}{{\mathsf{D}_{n, Q}}} \newcommand{\dnq}{{\mathsf{d}_{n, Q}}} \newcommand{\dn}{{\mathsf{d}_{n}}} \newcommand{\dnm}{{\mathsf{d}_{n-m}}} \newcommand{\dmn}{{\mathsf{d}_{n+m}}} \newcommand{\dx}{{\mathsf{d}_{\mathbb{X}}}} \providecommand{\med}{\text{median}} \providecommand{\median}{\text{median}} \providecommand{\Xnm}{{\mathbb{X}^*_{n-m}}} $$

Week-5

Math 183 • Statistical Methods • Spring 2026

Siddharth Vishwanath

Learning objectives

$$ % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%% % %%%%%%%%%%%%%%%%%%%%%%%%%% % %

% % \providecommand{}{p_{0}} \providecommand{}{p_{1}} \providecommand{}{p_{2}} \providecommand{}{p_{12}} \providecommand{1n}{p_{1n}} % % % % $$

  • Algebra of Random Variables
  • Law of Large Numbers
  • Central Limit Theorem
  • Normal approximation to Binomial
  • Normal Approximation to Poisson
  • Sampling Distributions

Algebra of Random Variables

Binomial Distribution

Sums of Binomial Random Variables

Let \(X \sim {\text{Bin}}(n, p)\), \(Y \sim {\text{Bin}}(m, p)\) and \(X \perp\kern-5pt \perp Y\), then \[ X + Y \sim {\text{Bin}}(n + m, p) \]

Sums of \(n\) \(iid\) Binomial Random Variables

Let \(X_1, X_2, \dots, X_n{\stackrel{iid}{\sim}}{\text{Bin}}(k, p)\) be \(iid\) Binomial random variables then \[ n\overline{X}= X_1 + X_2 + \dots + X_n \sim {\text{Bin}}(n \cdot k, p) \]

Poisson Distribution

Sums of Poisson Random Variables

Let \(X \sim {\text{Poi}}(\lambda)\), \(Y \sim {\text{Poi}}(\gamma)\) and \(X \perp\kern-5pt \perp Y\), then \[ X + Y \sim {\text{Poi}}(\lambda + \gamma) \]

Sums of \(n\) \(iid\) Poisson Random Variables

Let \(X_1, X_2, \dots, X_n{\stackrel{iid}{\sim}}{\text{Poi}}(\lambda)\) be \(iid\) Poisson random variables then \[ n \cdot \overline{X}= X_1 + X_2 + \dots + X_n \sim {\text{Poi}}(n \cdot \lambda) \]

Chi-squared Distribution

Sums of Chi-Squared Random Variables

Let \(X \sim {\chi^2}(d)\), \(Y \sim {\chi^2}(k)\) and \(X \perp\kern-5pt \perp Y\), then \[ X + Y \sim {\chi^2}(d+k) \]

Sums of \(n\) \(iid\) Chi-Squared Random Variables

Let \(X_1, X_2, \dots, X_n{\stackrel{iid}{\sim}}{\chi^2}(d)\) be \(iid\) Chi Squared random variables then \[ n \cdot \overline{X}= X_1 + X_2 + \dots + X_n \sim {\chi^2}(n \cdot d) \]

Normal Distribution

Sums of Normal Random Variables

Let \(X \sim N(\mu_X, \sigma_X^2)\), \(Y \sim N(\mu_Y, \sigma_Y^2)\) and \(X \perp\kern-5pt \perp Y\), then for all \(a, b \in {\mathbb R}\) \[ aX + bY \sim N\Big(a \mu_X + b \mu_Y, \ \ \ a^2 \sigma_X^2 + b^2 \sigma_Y^2\Big) \]

What happens when \(b=0\)? What happens when \(Y=1\) is not random?

Sums of \(n\) \(iid\) Normal Random Variables

Let \(X_1, X_2, \dots, X_n{\stackrel{iid}{\sim}}N(\mu, \sigma^2)\) be \(iid\) Normal random variables then \[ \overline{X}= \frac{X_1 + X_2 + \dots + X_n}{n} \sim N\Big(\mu, \frac{\sigma^2}{n}\Big) \]

Standardizing

Standardizing a Normal Random Variable

Let \(X \sim N(\mu, \sigma^2)\) be a Normal random variable with mean \(\mu\) and variance \(\sigma^2\) \[ \frac{X - \mu}{\sigma} \sim N(0, 1) \equiv Z \]

Why?

\[\begin{aligned} X &\sim N(\mu, \sigma^2)\\ X - \mu &\sim N(0, \sigma^2)\\ \sigma^{-1}(X - \mu) &\sim N(0, \sigma^2 / \sigma^2) = N(0, 1)\\ \end{aligned}\]

Student’s \(t\)-distribution

Student’s \(t\)-distribution

A random variable \(X \sim t(d)\) is said to follow the Student’s \(t\)-distribution with \(d\) degrees of freedom if \[ X = \frac{Z}{\sqrt{Y/d}} \qquad \text{where } Z \sim N(0, 1), \quad Y \sim {\chi^2}(d) \text{ and } X \perp\kern-5pt \perp Y. \]

Law of Large Numbers

Let \(X_1, X_2, \dots, X_n\) be \(iid\) random variables with:

  • Expected value \({\mathbb E}(X)\)

Then

\[ \overline{X}= \frac{X_1 + X_2 + \dots + X_n}{n} \approx {\mathbb E}(X) \qquad \text{as } n \rightarrow \infty. \]

#| standalone: true
#| viewerHeight: 600
#| components: viewer
#| layout: vertical

import numpy as np
import scipy
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from shiny import App, render, ui
from matplotlib.patches import Patch
import statsmodels.api as sm
from ipywidgets import interact, fixed


# Generate a random sample

# Define the UI
app_ui = ui.page_fluid(
    ui.layout_sidebar(
        ui.sidebar(
            ui.input_slider(
                "n",
                "n",
                min = 1,
                max = 100,
                value = 1,
                step = 1,
                ticks=True,
                animate=False
            ),
            ui.input_radio_buttons(
                "dist",
                "Distribution",
                choices=[
                    "Bernoulli",
                    "Uniform",
                    "Exponential",
                    "Normal",
                ],
                inline=False
            ),
            ui.panel_conditional(
                "input.dist == 'Bernoulli'", 
                ui.input_slider(
                    "p", 
                    "p",
                    min = 1e-1, max=1-1e-1, value=0.5, step=1e-1
                )
            ),
            ui.panel_conditional(
                "input.dist == 'Uniform'", 
                ui.input_slider(
                    "a", 
                    "a",
                    min = -5, max=0, value=-1, step=0.5
                ),
                ui.input_slider(
                    "b", 
                    "b",
                    min = 0, max=5, value=1, step=0.5
                )
            ),
            ui.panel_conditional(
                "input.dist == 'Exponential'", 
                ui.input_slider(
                    "lam", 
                    "lambda",
                    min = 1e-1, max=2-1e-1, value=1.0, step=1e-1
                )
            ),
            ui.panel_conditional(
                "input.dist == 'Normal'", 
                ui.input_slider(
                    "mu",
                    "mu",
                    min = -2.0, max=2.0, value=0.0, step=5e-1
                ),
                ui.input_slider(
                    "sigma", 
                    "sigma",
                    min = 0.5, max=2.5, value=0.5, step=0.5
                )
            ),
        ui.input_slider(
            "n_exp",
            "n_exp",
            min = 1,
            max = 10,
            value = 1,
            step = 1,
            ticks=True,
            animate=False
        ),
        ),
        ui.output_plot("plots", height="500px")
    )
)

# Define the server logic
def server(input, output, session):
    @output
    @render.plot
    def plots():
        fig, ax = plt.subplots(1, 2, figsize=(12, 5))

        n = input.n()
        if input.dist() == "Bernoulli":
            p = input.p()
            X = stats.bernoulli(p)
            zlim = np.array([-0.5, 1.5])
        elif input.dist() == "Uniform":
            a, b = input.a(), input.b()
            X = stats.uniform(loc=a, scale=b-a)
            zlim = np.array([-5, 5])
        elif input.dist() == "Exponential":
            lam = input.lam()
            X = stats.expon(0, 1 / lam)
            zlim = np.array([-0.5, 3])
        elif input.dist() == "Normal":
            mu, sigma = input.mu(), input.sigma()
            X = stats.norm(mu, sigma)
            zlim = np.array([-4, 4])
        
        np.random.seed(2024)
        for i in range(input.n_exp()):
            Xs = X.rvs(n)
            hist, edges = np.histogram(Xs, bins=20)
            ys = np.array([])
            xs = np.array([])
            for i in range(len(hist)):
                ys = np.append(ys, np.arange(hist[i]))
                xs = np.append(xs, np.repeat(edges[i], hist[i]))
            ax[0].scatter(xs, ys, alpha=max(0.2, 1 / input.n_exp()))
            ax[0].set_ylim([-0.5, 20])
            # ax[0].hist(Xs, bins=20, density=False, alpha=0.6, color='g')
            ax[0].set_xlim(zlim)
            average_X = np.cumsum(Xs) / np.arange(1, n + 1)
            ax[1].plot(range(1, n + 1), average_X)
            ax[1].axhline(y=X.mean(), color='red', linestyle='--')
            ax[1].set_ylim(zlim)
            ax[1].set_xlim([1, 100])
            ax[1].set_xlabel('Number of trials')
            ax[1].set_ylabel('Average')
            ax[1].set_title(f'Average of {n} samples from {X.dist.name}{X.args}')
            plt.tight_layout()
        return fig

# Create the Shiny app
app = App(app_ui, server)
app

Central Limit Theorem

Let \(X_1, X_2, \dots, X_n\) be \(iid\) random variables with:

  • Expected value \({\mathbb E}(X)\), AND
  • Variance \({\text{Var}}(X)\)

Then

\[ \frac{\overline{X}- {\mathbb E}(X)}{\sqrt{{{\text{Var}}(X)}/{n}}} \approx N(0,1) \qquad \text{as } n \rightarrow \infty. \]

Equivalently,

\[ \sqrt{n}\Bigg(\frac{\overline{X}- {\mathbb E}(X)}{{{\text{SD}(X)}}}\Bigg) \approx N(0,1) \qquad \text{as } n \rightarrow \infty. \]

#| standalone: true
#| viewerHeight: 600
#| components: viewer
#| layout: vertical

import numpy as np
import scipy
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from shiny import App, render, ui
from matplotlib.patches import Patch
import statsmodels.api as sm
from ipywidgets import interact, fixed
import seaborn as sns


# Generate a random sample

# Define the UI
app_ui = ui.page_fluid(
    ui.layout_sidebar(
        ui.sidebar(
            ui.input_slider(
                "n",
                "n",
                min = 10,
                max = 500,
                value = 1,
                step = 1,
                ticks=True,
                animate=False
            ),
            ui.input_radio_buttons(
                "dist",
                "Distribution",
                choices=[
                    "Bernoulli",
                    "Uniform",
                    "Exponential",
                    "Normal",
                ],
                inline=False
            ),
            ui.panel_conditional(
                "input.dist == 'Bernoulli'", 
                ui.input_slider(
                    "p", 
                    "p",
                    min = 1e-1, max=1-1e-1, value=0.5, step=1e-1
                )
            ),
            ui.panel_conditional(
                "input.dist == 'Uniform'", 
                ui.input_slider(
                    "a", 
                    "a",
                    min = -5, max=0, value=-1, step=0.5
                ),
                ui.input_slider(
                    "b", 
                    "b",
                    min = 0, max=5, value=1, step=0.5
                )
            ),
            ui.panel_conditional(
                "input.dist == 'Exponential'", 
                ui.input_slider(
                    "lam", 
                    "lambda",
                    min = 1e-1, max=2-1e-1, value=1.0, step=1e-1
                )
            ),
            ui.panel_conditional(
                "input.dist == 'Normal'", 
                ui.input_slider(
                    "mu",
                    "mu",
                    min = -2.0, max=2.0, value=0.0, step=5e-1
                ),
                ui.input_slider(
                    "sigma", 
                    "sigma",
                    min = 0.5, max=2.5, value=0.5, step=0.5
                )
            ),
        ),
        ui.output_plot("plots", height="500px")
    )
)

# Define the server logic
def server(input, output, session):
    @output
    @render.plot
    def plots():
        fig, ax = plt.subplots(1, 2, figsize=(12, 5))

        n = input.n()
        if input.dist() == "Bernoulli":
            p = input.p()
            X = stats.bernoulli(p)
            zlim = np.array([-0.5, 1.5])
        elif input.dist() == "Uniform":
            a, b = input.a(), input.b()
            X = stats.uniform(loc=a, scale=b-a)
            zlim = np.array([-5, 5])
        elif input.dist() == "Exponential":
            lam = input.lam()
            X = stats.expon(0, 1 / lam)
            zlim = np.array([-0.5, 3])
        elif input.dist() == "Normal":
            mu, sigma = input.mu(), input.sigma()
            X = stats.norm(mu, sigma)
            zlim = np.array([-4, 4])
        
        np.random.seed(2024)
        # ax[0].set_xlim(zlim)
        Xn = np.array([(X.rvs(n).mean() - X.mean()) / (X.std() / np.sqrt(n)) for _ in range(n)])
        ax[0].hist(Xn, bins=20, density=True, alpha=0.6, color='dodgerblue')
        xlim = ax[0].get_xlim()
        xs = np.linspace(*xlim, 100)
        ax[0].plot(xs, stats.norm.pdf(xs), color='red', linestyle='--')
        ax[0].set_title(f'{n} experiments sampling from {X.dist.name}{X.args}')
        ax[0].set_ylim([0, 0.5])

        ecdf = lambda x: np.sum(Xn < x) / n
        ax[1].plot(xs, stats.norm.cdf(xs), color='red', linestyle='--')
        ax[1].plot(xs, [ecdf(x) for x in xs])
        plt.tight_layout()
        return fig

# Create the Shiny app
app = App(app_ui, server)
app

CLT vs LLN

Let \(X_1, X_2, \dots, X_n\) be \(iid\) with expected value \({\mathbb E}(X)\), AND variance \({\text{Var}}(X)\)

Law of Large Numbers

\[ \overline{X}- {\mathbb E}(X) \rightarrow 0. \]

Central Limit Theorem

\[ \sqrt{n}\Bigg(\frac{\overline{X}- {\mathbb E}(X)}{\text{SD}(X)}\Bigg) \Rightarrow N(0, 1). \]

Normal Approx. of Binomial & Poisson

Binomial-Normal Approximation (w/o continuity correction)

Let \(X \sim {\text{Bin}}(n, p)\). For large \(n\) \[ X \approx N\Big(np, \ \ \ np \cdot (1-p)\Big) \]

Poisson-Normal Approximation (w/o continuity correction)

Let \(X \sim {\text{Poi}}(\lambda)\). For large \(\lambda\) \[ X \approx N\Big(\lambda, \ \ \lambda\Big) \]