Measures.tex: Difference between revisions
No edit summary |
No edit summary |
||
(4 intermediate revisions by 2 users not shown) | |||
Line 1: | Line 1: | ||
\section{Passing between probability measures} | \section{Passing between probability measures} \label{sec:measures} | ||
\ | The goal of this section is to work out bounds for the error arising when passing back and forth between $\unif_k$ and $\ens{k}$, as described in Section~\ref{sec:outline-dist}. Lemma~\ref{lem:distributions} below gives the bounds we need. The reader will not lose much by just reading its statement; the proof is just technical calculations. | ||
Before stating Lemma~\ref{lem:distributions} we need some definitions. | |||
\ | \ignore{ | ||
\begin{definition} Given a set $A \subseteq [k]^n$ and a restriction $(J,x_\barJ)$, we write $A_{x_\barJ}$ for the subset of $[k]^{J}$ defined by $A_{x_\barJ} = \{y \in [k]^J : (x_{\barJ}, y_J) \in A\}$. | |||
\end{definition}} | |||
\begin{definition} \label{def:r4r} For $0 \leq \ppn \leq 1$, we say that $J$ is a \emph{$\ppn$-random subset} of $[n]$ if $J$ is formed by including each coordinate $i \in [n]$ independently with probability $\ppn$. Assuming $r \leq n/2$, we say that $J$ is an \emph{$[r,4r]$-random subset} of $[n]$ if $J$ is a $\ppn$-random subset of $[n]$ conditioned on $r \leq \abs{J} \leq 4r$, with $\ppn = 2r/n$. | |||
\end{definition} | |||
\begin{definition} A \emph{distribution family} $(\distra^m)_{m \in \N}$ (over $[k]$) is a sequence of probability distributions, where $\distra^m$ is a distribution on $[k]^m$. In this paper the families we consider will either be the equal-(nondegenerate-)slices family $\distra^m = \ens{k}^m$ or $\distra^m = \eqs{k}^m$, or will be the product distributions based on a single distribution $\prd$ on $[k]$, $\distra^m = \prd^{\otimes m}$. | |||
\end{definition} | |||
\begin{ | \begin{lemma} \label{lem:distributions} Let $(\distra^m)$ and $(\distrb^m)$ be distribution families. Assume $2 \ln n \leq r \leq n/2$. Let $J$ be an $[r,4r]$-random subset of $[n]$, let $x$ be drawn from $[k]^{\barJ}$ according to $\distra^{\abs{\barJ}}$, and let $y$ be drawn from $[k]^J$ according to $\distrb^{\abs{J}}$. The resulting distribution on the composite string $(x,y) \in [k]^n$ has total variation distance from $\distra^n$ which can be bounded as follows: | ||
\begin{enumerate} | |||
\item (Product to equal-slices.) \label{eqn:distrs-prd-eqs} If $\distra^m = \prd^{\otimes m}$ and $\distrb^m = \eqs{\ell}^m$ for $\ell \leq k$, the bound is \noteryan{You know, we only need this result for the uniform distribution, in which case we can bound the below by the simpler $2k \cdot r/\sqrt{n}$.} | |||
\[ | \[ | ||
\ | (2{\textstyle \sqrt{\frac{1}{\min(\prd)}-1}})+2) \cdot r / \sqrt{n}. | ||
\] | \] | ||
\end{ | \item (Equal-slices to product.) \label{eqn:distrs-eqs-prd} If $\distra^m = \eqs{k}^m$ and $\distrb^m = \prd^{\otimes m}$, the bound is $4k \cdot r/\sqrt{n}$, independent of $\prd$. | ||
\begin{definition} | \item (Equal-slices to equal-slices.) \label{eqn:distrs-eqs-eqs} If $\distra^m = \eqs{k}^m$ and $\distrb^m = \eqs{\ell}^m$ for $\ell \leq k$, the bound is $4k \cdot r/\sqrt{n}$. | ||
\end{enumerate} | |||
\end{lemma} | |||
Although Lemma~\ref{lem:distributions} involves the equal-slices distribution, one can convert to equal-nondegenerate-slices if desired using Proposition~\ref{prop:degen}. | |||
Since $\eqs{k}^n$ is a mixture of product distributions (Proposition~\ref{prop:eqs-mix}), the main work in proving Lemma~\ref{lem:distributions} involves comparing product distributions. | |||
\subsection{Comparing product distributions} | |||
\begin{definition} For $\distra$ and $\distrb$ probability distributions on $\Omega^n$, the \emph{$\chi^2$ distance} $\dchi{\pi}{\nu}$ is defined by | |||
\[ | \[ | ||
\dchi{\ | \dchi{\distra}{\distrb} = \sqrt{\Varx_{x \sim \distra}\left[\frac{\distrb[x]}{\distra[x]}\right]}. | ||
\] | \] | ||
Note that $\dchi{\ | Note that $\dchi{\distra}{\distrb}$ is \emph{not} symmetric in $\distra$ and $\distrb$. | ||
\end{definition} | \end{definition} | ||
The $\chi^2$ distance is introduced to help us prove the following fact: | The $\chi^2$ distance is introduced to help us prove the following fact: | ||
\begin{proposition} \label{prop:mix-distance} | \begin{proposition} \label{prop:mix-distance} Let $\prd$ be a distribution on $\Omega$ with full support; i.e., $\min(\pi) \neq 0$. Suppose $\prd$ is slightly mixed with $\distrb$, forming $\wh{\prd}$; specifically, $\wh{\prd} = (1-\ppn) \prd + \ppn \distrb$. Then the associated product distributions $\prd^{\otimes n}$, $\wh{\prd}^{\otimes n}$ on $\Omega^{n}$ satisfy | ||
\[ | \[ | ||
\dtv{\ | \dtv{\prd^{\otimes n}}{\wh{\prd}^{\otimes n}} \leq \dchi{\prd}{\distrb} \cdot \ppn \sqrt{n}. | ||
\] | \] | ||
\end{proposition} | \end{proposition} | ||
\begin{proof} It is a straightforward consequence of Cauchy-Schwarz (see, e.g.~\cite[p.\ 101]{Rei89})\noteryan{This is the part using $\min(\ | \begin{proof} It is a straightforward consequence of Cauchy-Schwarz (see, e.g.~\cite[p.\ 101]{Rei89})\noteryan{This is the part using $\min(\prd) \neq 0$, by the way.} that | ||
\[ | \[ | ||
\dtv{\ | \dtv{\prd^{\otimes n}}{\wh{\prd}^{\otimes n}} \leq \dchi{\prd}{\wh{\prd}} \cdot \sqrt{n}, | ||
\] | \] | ||
and the identity $\dchi{\ | and the identity $\dchi{\prd}{\wh{\prd}} = \ppn \cdot \dchi{\prd}{\distrb}$ follows easily from the definitions. | ||
\end{proof} | \end{proof} | ||
This can be bounded independently of $\ | This can be bounded independently of $\distrb$, as follows: | ||
\begin{corollary} \label{cor:mix-distance} In the setting of Proposition~\ref{prop:mix-distance}, | \begin{corollary} \label{cor:mix-distance} In the setting of Proposition~\ref{prop:mix-distance}, | ||
\[ | \[ | ||
\dtv{\ | \dtv{\prd^{\otimes n}}{\wh{\prd}^{\otimes n}} \leq \sqrt{{\textstyle \frac{1}{\min(\prd)}} - 1} \cdot \ppn \sqrt{n}, | ||
\] | \] | ||
\end{corollary} | \end{corollary} | ||
\begin{proof} It is easy to check that the distribution $\ | \begin{proof} It is easy to check that the distribution $\distrb$ maximizing $\dchi{\prd}{\distrb}$ is the one putting all its mass on the $x$ minimizing $\prd[x]$. In this case one calculates $\dchi{\prd}{\distrb} = \sqrt{\frac{1}{\min(\pi)} - 1}$. | ||
\end{proof} | \end{proof} | ||
Line 45: | Line 64: | ||
\ | \subsection{Proof of Lemma~\ref{lem:distributions}} | ||
\begin{definition} | \begin{definition} \label{def:compos-distr} Let $0 \leq \ppn \leq 1$ and let $(\distra^m)$, $(\distrb^m)$ be distribution families. Drawing from the \emph{$(\ppn, \distra, \distrb)$-composite distribution} on $[k]^n$ entails the following: $J$ is taken to be a $\ppn$-random subset of~$[n]$; $x$ is drawn from $[k]^{\barJ}$ according to $\distra^{\abs{\barJ}}$; and, $y$ is drawn from $[k]^J$ according to $\distrb^{\abs{J}}$. We sometimes think of this distribution as just being a distribution on composite strings $z = (x, y) \in [k]^n$. | ||
\end{definition} | \end{definition} | ||
\ | Note that the distribution described in Lemma~\ref{lem:distributions} is very similar to the $(\ppn, \distra, \distrb)$-composite distribution, except that it uses an $[r, 4r]$-random subset rather than a $\ppn$-random subset. We can account for this difference with a standard Chernoff (large-deviation) bound:\noteryan{Citation needed?} | ||
\ | \begin{fact} \label{fact:dev} If $J$ is a $\ppn$-random subset of $[n]$ with $\ppn = 2r/n$ as in Definition~\ref{def:r4r}, then $r \leq \abs{J} \leq 4r$ holds except with probability at most $2\exp(-r/4)$. | ||
\end{fact} | |||
\ | The utility of using $\ppn$-random subsets in Definition~\ref{def:compos-distr} is the following observation: | ||
\begin{fact} If $\prd$ and $\distrb$ are distributions on $[k]$, thought of also as product distribution families, then the $(\ppn, \prd, \distrb)$-composite distribution on $[k]^n$ is precisely the product distribution $\wh{\prd}^{\otimes n}$, where $\wh{\prd}$ is the mixture distribution $(1-\ppn)\prd + \ppn \distrb$ on $[k]$. | |||
\begin{fact} If $\ | |||
\end{fact} | \end{fact} | ||
Because of this, we can use Corollary~\ref{cor:mix-distance} to bound the total variation distance between $\ | Because of this, we can use Corollary~\ref{cor:mix-distance} to bound the total variation distance between $\prd^{\otimes n}$ and a composite distribution. We conclude: | ||
\begin{proposition} \label{prop:prod-composite} Let $\prd$ and $\distrb$ be any distributions on $[k]$, thought of also as product distribution families. Writing $\wt{\prd}$ for the $(\ppn,\prd,\distrb)$-composite distribution on strings in $[k]^n$, we have | |||
\[ | |||
\dtv{\prd^{\otimes n}}{\wt{\prd}} \leq {\textstyle \sqrt{\frac{1}{\min(\prd)}-1}} \cdot \ppn \sqrt{n}. | |||
\] | |||
\end{proposition} | |||
\ | Recall that for any $\ell \leq k$, the equal-slices distribution $\eqs{\ell}^{m}$ on $m$ coordinates is a mixture of product distributions $\spac^{\otimes m}$ on $[k]^m$. We can therefore average Proposition~\ref{prop:prod-composite} over $\distrb$ to obtain: | ||
\begin{proposition} \label{prop:prod-eqs} If $\wt{\pi}$ denotes the $(\ppn,\pi,\eqs{\ell})$-composite distribution on strings in $[k]^n$, where $\ell \leq k$, then we have | |||
\[ | \[ | ||
\dtv{\pi^{\otimes n}}{\wt{\pi}} \leq {\textstyle \sqrt{\frac{1}{\min(\pi)}-1}} \cdot \ | \dtv{\pi^{\otimes n}}{\wt{\pi}} \leq {\textstyle \sqrt{\frac{1}{\min(\pi)}-1}} \cdot \ppn \sqrt{n}. | ||
\] | \] | ||
\end{proposition} | \end{proposition} | ||
Here we have used the following basic bound, based on the triangle inequality: | |||
\ | \begin{fact} \label{fact:tv-mix} Let $(\distrb_\kappa)_{\kappa \in K}$ be a family of distributions on $\Omega^n$, let $\varsigma$ be a distribution on $K$, and let $\overline{\distrb}$ denote the associated mixture distribution, given by drawing $\kappa \sim \varsigma$ and then drawing from $\distrb_\kappa$. Then | ||
\[ | \[ | ||
\dtv{\ | \dtv{\distra}{\overline{\distrb}} \leq \Ex_{\kappa \sim \varsigma}[\dtv{\distra}{\distrb_\kappa}]. | ||
\] | \] | ||
\end{ | \end{fact} | ||
If we instead average Proposition~\ref{prop:prod-composite} over $\ | |||
\begin{proposition} \label{prop:eqs-prod} Let $\ | If we instead use this fact to average Proposition~\ref{prop:prod-composite} over $\prd$, we can obtain: | ||
\begin{proposition} \label{prop:eqs-prod} Let $\distrb$ be any distribution on $[k]$. Writing $\distra$ for the $(\ppn, \eqs{k}, \distrb)$-composite distribution on strings in $[k]^n$, we have | |||
\[ | \[ | ||
\dtv{\eqs{k}^n}{\ | \dtv{\eqs{k}^n}{\distra} \leq (2k-1)\ppn \sqrt{n}. | ||
\] | \] | ||
\end{proposition} | \end{proposition} | ||
\begin{proof} | \begin{proof} | ||
Thinking of $\eqs{k}^m$ as the mixture of product distributions $\ | Thinking of $\eqs{k}^m$ as the mixture of product distributions $\spac^{\otimes m}$, where $\spac$ is a random spacing on $[k]$, Fact~\ref{fact:tv-mix} and Proposition~\ref{prop:prod-composite} imply | ||
\[ | \[ | ||
\dtv{\eqs{k}^n}{\ | \dtv{\eqs{k}^n}{\distra} \leq \Ex_{\spac}\left[{\textstyle \sqrt{\frac{1}{\min(\spac)}-1}}\right] \cdot \ppn \sqrt{n}. | ||
\] | \] | ||
We can upper-bound the expectation\noteryan{Undoubtedly someone has worked hard on this $-1/2$th moment of the least spacing before (Devroye '81 or '86 perhaps), but I think it's probably okay to do the following simple thing here} by | We can upper-bound the expectation\noteryan{Undoubtedly someone has worked hard on this $-1/2$th moment of the least spacing before (Devroye '81 or '86 perhaps), but I think it's probably okay to do the following simple thing here} by | ||
\begin{multline*} | \begin{multline*} | ||
\Ex_{\ | \Ex_{\spac}\left[{\textstyle \sqrt{\frac{1}{\min(\spac)}}}\right] \quad=\quad \int_{0}^\infty \Pr_{\spac}\left[{\textstyle \sqrt{\frac{1}{\min(\spac)}}} \geq t\right]\,dt | ||
\quad=\quad \int_{0}^\infty \Pr_{\ | \quad=\quad \int_{0}^\infty \Pr_{\spac}[\min(\spac) \leq 1/t^2]\,dt \\ | ||
\leq\quad k + \int_{k}^\infty \Pr_{\ | \leq\quad k + \int_{k}^\infty \Pr_{\spac}[\min(\spac) \leq 1/t^2]\,dt \quad\leq\quad k + \int_{k}^\infty (k(k-1)/t^2) \,dt \quad=\quad 2k-1, | ||
\end{multline*} | \end{multline*} | ||
where in the second-to-last step we used Proposition~\ref{prop:rand-min}. | where in the second-to-last step we used Proposition~\ref{prop:rand-min}. | ||
\end{proof} | \end{proof} | ||
Averaging now once more in the second component, we obtain the following: | Averaging now once more in the second component, we obtain the following: | ||
\begin{proposition} \label{prop:eqs-eqs} Let $2 \leq \ell \leq k$ and let $\ | \begin{proposition} \label{prop:eqs-eqs} Let $2 \leq \ell \leq k$ and let $\distra'$ denote the $(\ppn, \eqs{k}, \eqs{\ell})$-composite distribution on strings in $[k]^n$. Then | ||
\[ | \[ | ||
\dtv{\eqs{k}^n}{\ | \dtv{\eqs{k}^n}{\distra'} \leq (2k-1) \ppn \sqrt{n}. | ||
\] | \] | ||
\end{proposition} | \end{proposition} | ||
We can | |||
\begin{ | We can now obtain the proof of Lemma~\ref{lem:distributions}: | ||
\begin{proof} The three statements in Lemma~\ref{lem:distributions} essentially follow from Propositions~\ref{prop:prod-eqs}, \ref{prop:eqs-prod}, and \ref{prop:eqs-eqs}, taking $\ppn = 2r/n$. This would give bounds of $2{\textstyle \sqrt{\frac{1}{\min(\pi)}-1}} \cdot r / \sqrt{n}$, $(4k-2) \cdot r/\sqrt{n}$, and $(4k-2) \cdot r/\sqrt{n}$, respectively. However we need to account for conditioning on $r \leq \abs{J} \leq 4r$. By Fact~\ref{fact:dev}, this conditioning increases the total variation distance by at most $2\exp(-r/4)$. Using the lower bound $r \geq 2 \ln n$ from the lemma's hypothesis, this quantity is at most $2r/\sqrt{n}$, completing the proof. | |||
\end{proof} | \end{proof} |
Latest revision as of 12:25, 8 July 2009
\section{Passing between probability measures} \label{sec:measures}
The goal of this section is to work out bounds for the error arising when passing back and forth between $\unif_k$ and $\ens{k}$, as described in Section~\ref{sec:outline-dist}. Lemma~\ref{lem:distributions} below gives the bounds we need. The reader will not lose much by just reading its statement; the proof is just technical calculations.
Before stating Lemma~\ref{lem:distributions} we need some definitions.
\ignore{ \begin{definition} Given a set $A \subseteq [k]^n$ and a restriction $(J,x_\barJ)$, we write $A_{x_\barJ}$ for the subset of $[k]^{J}$ defined by $A_{x_\barJ} = \{y \in [k]^J : (x_{\barJ}, y_J) \in A\}$. \end{definition}}
\begin{definition} \label{def:r4r} For $0 \leq \ppn \leq 1$, we say that $J$ is a \emph{$\ppn$-random subset} of $[n]$ if $J$ is formed by including each coordinate $i \in [n]$ independently with probability $\ppn$. Assuming $r \leq n/2$, we say that $J$ is an \emph{$[r,4r]$-random subset} of $[n]$ if $J$ is a $\ppn$-random subset of $[n]$ conditioned on $r \leq \abs{J} \leq 4r$, with $\ppn = 2r/n$. \end{definition} \begin{definition} A \emph{distribution family} $(\distra^m)_{m \in \N}$ (over $[k]$) is a sequence of probability distributions, where $\distra^m$ is a distribution on $[k]^m$. In this paper the families we consider will either be the equal-(nondegenerate-)slices family $\distra^m = \ens{k}^m$ or $\distra^m = \eqs{k}^m$, or will be the product distributions based on a single distribution $\prd$ on $[k]$, $\distra^m = \prd^{\otimes m}$. \end{definition}
\begin{lemma} \label{lem:distributions} Let $(\distra^m)$ and $(\distrb^m)$ be distribution families. Assume $2 \ln n \leq r \leq n/2$. Let $J$ be an $[r,4r]$-random subset of $[n]$, let $x$ be drawn from $[k]^{\barJ}$ according to $\distra^{\abs{\barJ}}$, and let $y$ be drawn from $[k]^J$ according to $\distrb^{\abs{J}}$. The resulting distribution on the composite string $(x,y) \in [k]^n$ has total variation distance from $\distra^n$ which can be bounded as follows:
\begin{enumerate}
\item (Product to equal-slices.) \label{eqn:distrs-prd-eqs} If $\distra^m = \prd^{\otimes m}$ and $\distrb^m = \eqs{\ell}^m$ for $\ell \leq k$, the bound is \noteryan{You know, we only need this result for the uniform distribution, in which case we can bound the below by the simpler $2k \cdot r/\sqrt{n}$.}
\[
(2{\textstyle \sqrt{\frac{1}{\min(\prd)}-1}})+2) \cdot r / \sqrt{n}.
\]
\item (Equal-slices to product.) \label{eqn:distrs-eqs-prd} If $\distra^m = \eqs{k}^m$ and $\distrb^m = \prd^{\otimes m}$, the bound is $4k \cdot r/\sqrt{n}$, independent of $\prd$.
\item (Equal-slices to equal-slices.) \label{eqn:distrs-eqs-eqs} If $\distra^m = \eqs{k}^m$ and $\distrb^m = \eqs{\ell}^m$ for $\ell \leq k$, the bound is $4k \cdot r/\sqrt{n}$.
\end{enumerate}
\end{lemma}
Although Lemma~\ref{lem:distributions} involves the equal-slices distribution, one can convert to equal-nondegenerate-slices if desired using Proposition~\ref{prop:degen}.
Since $\eqs{k}^n$ is a mixture of product distributions (Proposition~\ref{prop:eqs-mix}), the main work in proving Lemma~\ref{lem:distributions} involves comparing product distributions.
\subsection{Comparing product distributions}
\begin{definition} For $\distra$ and $\distrb$ probability distributions on $\Omega^n$, the \emph{$\chi^2$ distance} $\dchi{\pi}{\nu}$ is defined by
\[
\dchi{\distra}{\distrb} = \sqrt{\Varx_{x \sim \distra}\left[\frac{\distrb[x]}{\distra[x]}\right]}.
\]
Note that $\dchi{\distra}{\distrb}$ is \emph{not} symmetric in $\distra$ and $\distrb$.
\end{definition}
The $\chi^2$ distance is introduced to help us prove the following fact: \begin{proposition} \label{prop:mix-distance} Let $\prd$ be a distribution on $\Omega$ with full support; i.e., $\min(\pi) \neq 0$. Suppose $\prd$ is slightly mixed with $\distrb$, forming $\wh{\prd}$; specifically, $\wh{\prd} = (1-\ppn) \prd + \ppn \distrb$. Then the associated product distributions $\prd^{\otimes n}$, $\wh{\prd}^{\otimes n}$ on $\Omega^{n}$ satisfy \[ \dtv{\prd^{\otimes n}}{\wh{\prd}^{\otimes n}} \leq \dchi{\prd}{\distrb} \cdot \ppn \sqrt{n}. \] \end{proposition} \begin{proof} It is a straightforward consequence of Cauchy-Schwarz (see, e.g.~\cite[p.\ 101]{Rei89})\noteryan{This is the part using $\min(\prd) \neq 0$, by the way.} that \[ \dtv{\prd^{\otimes n}}{\wh{\prd}^{\otimes n}} \leq \dchi{\prd}{\wh{\prd}} \cdot \sqrt{n}, \] and the identity $\dchi{\prd}{\wh{\prd}} = \ppn \cdot \dchi{\prd}{\distrb}$ follows easily from the definitions. \end{proof} This can be bounded independently of $\distrb$, as follows: \begin{corollary} \label{cor:mix-distance} In the setting of Proposition~\ref{prop:mix-distance}, \[ \dtv{\prd^{\otimes n}}{\wh{\prd}^{\otimes n}} \leq \sqrt{{\textstyle \frac{1}{\min(\prd)}} - 1} \cdot \ppn \sqrt{n}, \] \end{corollary} \begin{proof} It is easy to check that the distribution $\distrb$ maximizing $\dchi{\prd}{\distrb}$ is the one putting all its mass on the $x$ minimizing $\prd[x]$. In this case one calculates $\dchi{\prd}{\distrb} = \sqrt{\frac{1}{\min(\pi)} - 1}$. \end{proof}
\subsection{Proof of Lemma~\ref{lem:distributions}}
\begin{definition} \label{def:compos-distr} Let $0 \leq \ppn \leq 1$ and let $(\distra^m)$, $(\distrb^m)$ be distribution families. Drawing from the \emph{$(\ppn, \distra, \distrb)$-composite distribution} on $[k]^n$ entails the following: $J$ is taken to be a $\ppn$-random subset of~$[n]$; $x$ is drawn from $[k]^{\barJ}$ according to $\distra^{\abs{\barJ}}$; and, $y$ is drawn from $[k]^J$ according to $\distrb^{\abs{J}}$. We sometimes think of this distribution as just being a distribution on composite strings $z = (x, y) \in [k]^n$.
\end{definition}
Note that the distribution described in Lemma~\ref{lem:distributions} is very similar to the $(\ppn, \distra, \distrb)$-composite distribution, except that it uses an $[r, 4r]$-random subset rather than a $\ppn$-random subset. We can account for this difference with a standard Chernoff (large-deviation) bound:\noteryan{Citation needed?} \begin{fact} \label{fact:dev} If $J$ is a $\ppn$-random subset of $[n]$ with $\ppn = 2r/n$ as in Definition~\ref{def:r4r}, then $r \leq \abs{J} \leq 4r$ holds except with probability at most $2\exp(-r/4)$. \end{fact}
The utility of using $\ppn$-random subsets in Definition~\ref{def:compos-distr} is the following observation: \begin{fact} If $\prd$ and $\distrb$ are distributions on $[k]$, thought of also as product distribution families, then the $(\ppn, \prd, \distrb)$-composite distribution on $[k]^n$ is precisely the product distribution $\wh{\prd}^{\otimes n}$, where $\wh{\prd}$ is the mixture distribution $(1-\ppn)\prd + \ppn \distrb$ on $[k]$. \end{fact}
Because of this, we can use Corollary~\ref{cor:mix-distance} to bound the total variation distance between $\prd^{\otimes n}$ and a composite distribution. We conclude: \begin{proposition} \label{prop:prod-composite} Let $\prd$ and $\distrb$ be any distributions on $[k]$, thought of also as product distribution families. Writing $\wt{\prd}$ for the $(\ppn,\prd,\distrb)$-composite distribution on strings in $[k]^n$, we have \[ \dtv{\prd^{\otimes n}}{\wt{\prd}} \leq {\textstyle \sqrt{\frac{1}{\min(\prd)}-1}} \cdot \ppn \sqrt{n}. \] \end{proposition}
Recall that for any $\ell \leq k$, the equal-slices distribution $\eqs{\ell}^{m}$ on $m$ coordinates is a mixture of product distributions $\spac^{\otimes m}$ on $[k]^m$. We can therefore average Proposition~\ref{prop:prod-composite} over $\distrb$ to obtain: \begin{proposition} \label{prop:prod-eqs} If $\wt{\pi}$ denotes the $(\ppn,\pi,\eqs{\ell})$-composite distribution on strings in $[k]^n$, where $\ell \leq k$, then we have \[ \dtv{\pi^{\otimes n}}{\wt{\pi}} \leq {\textstyle \sqrt{\frac{1}{\min(\pi)}-1}} \cdot \ppn \sqrt{n}. \] \end{proposition} Here we have used the following basic bound, based on the triangle inequality: \begin{fact} \label{fact:tv-mix} Let $(\distrb_\kappa)_{\kappa \in K}$ be a family of distributions on $\Omega^n$, let $\varsigma$ be a distribution on $K$, and let $\overline{\distrb}$ denote the associated mixture distribution, given by drawing $\kappa \sim \varsigma$ and then drawing from $\distrb_\kappa$. Then \[ \dtv{\distra}{\overline{\distrb}} \leq \Ex_{\kappa \sim \varsigma}[\dtv{\distra}{\distrb_\kappa}]. \] \end{fact}
If we instead use this fact to average Proposition~\ref{prop:prod-composite} over $\prd$, we can obtain: \begin{proposition} \label{prop:eqs-prod} Let $\distrb$ be any distribution on $[k]$. Writing $\distra$ for the $(\ppn, \eqs{k}, \distrb)$-composite distribution on strings in $[k]^n$, we have \[ \dtv{\eqs{k}^n}{\distra} \leq (2k-1)\ppn \sqrt{n}. \] \end{proposition} \begin{proof} Thinking of $\eqs{k}^m$ as the mixture of product distributions $\spac^{\otimes m}$, where $\spac$ is a random spacing on $[k]$, Fact~\ref{fact:tv-mix} and Proposition~\ref{prop:prod-composite} imply \[ \dtv{\eqs{k}^n}{\distra} \leq \Ex_{\spac}\left[{\textstyle \sqrt{\frac{1}{\min(\spac)}-1}}\right] \cdot \ppn \sqrt{n}. \] We can upper-bound the expectation\noteryan{Undoubtedly someone has worked hard on this $-1/2$th moment of the least spacing before (Devroye '81 or '86 perhaps), but I think it's probably okay to do the following simple thing here} by \begin{multline*} \Ex_{\spac}\left[{\textstyle \sqrt{\frac{1}{\min(\spac)}}}\right] \quad=\quad \int_{0}^\infty \Pr_{\spac}\left[{\textstyle \sqrt{\frac{1}{\min(\spac)}}} \geq t\right]\,dt \quad=\quad \int_{0}^\infty \Pr_{\spac}[\min(\spac) \leq 1/t^2]\,dt \\ \leq\quad k + \int_{k}^\infty \Pr_{\spac}[\min(\spac) \leq 1/t^2]\,dt \quad\leq\quad k + \int_{k}^\infty (k(k-1)/t^2) \,dt \quad=\quad 2k-1, \end{multline*} where in the second-to-last step we used Proposition~\ref{prop:rand-min}. \end{proof} Averaging now once more in the second component, we obtain the following: \begin{proposition} \label{prop:eqs-eqs} Let $2 \leq \ell \leq k$ and let $\distra'$ denote the $(\ppn, \eqs{k}, \eqs{\ell})$-composite distribution on strings in $[k]^n$. Then \[ \dtv{\eqs{k}^n}{\distra'} \leq (2k-1) \ppn \sqrt{n}. \] \end{proposition}
We can now obtain the proof of Lemma~\ref{lem:distributions}:
\begin{proof} The three statements in Lemma~\ref{lem:distributions} essentially follow from Propositions~\ref{prop:prod-eqs}, \ref{prop:eqs-prod}, and \ref{prop:eqs-eqs}, taking $\ppn = 2r/n$. This would give bounds of $2{\textstyle \sqrt{\frac{1}{\min(\pi)}-1}} \cdot r / \sqrt{n}$, $(4k-2) \cdot r/\sqrt{n}$, and $(4k-2) \cdot r/\sqrt{n}$, respectively. However we need to account for conditioning on $r \leq \abs{J} \leq 4r$. By Fact~\ref{fact:dev}, this conditioning increases the total variation distance by at most $2\exp(-r/4)$. Using the lower bound $r \geq 2 \ln n$ from the lemma's hypothesis, this quantity is at most $2r/\sqrt{n}$, completing the proof. \end{proof}