Update.
[tex.git] / elbo.tex
1 %% -*- mode: latex; mode: reftex; mode: flyspell; coding: utf-8; tex-command: "pdflatex.sh" -*-
2
3 %% Any copyright is dedicated to the Public Domain.
4 %% https://creativecommons.org/publicdomain/zero/1.0/
5 %% Written by Francois Fleuret <francois@fleuret.org>
6
7 \documentclass[11pt,a4paper,oneside]{article}
8 \usepackage[paperheight=15cm,paperwidth=8cm,top=2mm,bottom=15mm,right=2mm,left=2mm]{geometry}
9 %\usepackage[a4paper,top=2.5cm,bottom=2cm,left=2.5cm,right=2.5cm]{geometry}
10 \usepackage[utf8]{inputenc}
11 \usepackage[T1]{fontenc}
12 \usepackage{amsmath,amssymb,dsfont}
13 \usepackage[pdftex]{graphicx}
14 \usepackage[colorlinks=true,linkcolor=blue,urlcolor=blue,citecolor=blue]{hyperref}
15 \usepackage{tikz}
16 \usetikzlibrary{arrows,arrows.meta,calc}
17 \usetikzlibrary{patterns,backgrounds}
18 \usetikzlibrary{positioning,fit}
19 \usetikzlibrary{shapes.geometric,shapes.multipart}
20 \usetikzlibrary{patterns.meta,decorations.pathreplacing,calligraphy}
21 \usetikzlibrary{tikzmark}
22 \usetikzlibrary{decorations.pathmorphing}
23 \usepackage[round]{natbib}
24 \usepackage[osf]{libertine}
25 \usepackage{microtype}
26 \usepackage{fancyvrb}
27
28 \usepackage{mleftright}
29
30 \newcommand{\setmuskip}[2]{#1=#2\relax}
31 \setmuskip{\thinmuskip}{1.5mu} % by default it is equal to 3 mu
32 \setmuskip{\medmuskip}{2mu} % by default it is equal to 4 mu
33 \setmuskip{\thickmuskip}{3.5mu} % by default it is equal to 5 mu
34
35 \setlength{\parindent}{0cm}
36 \setlength{\parskip}{1ex}
37 %\renewcommand{\baselinestretch}{1.3}
38 %\setlength{\tabcolsep}{0pt}
39 %\renewcommand{\arraystretch}{1.0}
40
41 \def\argmax{\operatornamewithlimits{argmax}}
42 \def\argmin{\operatornamewithlimits{argmin}}
43
44 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
45
46 \def\given{\,\middle\vert\,}
47 \def\proba{\operatorname{P}}
48 \newcommand{\seq}{{S}}
49 \newcommand{\expect}{\mathds{E}}
50 \newcommand{\variance}{\mathds{V}}
51 \newcommand{\empexpect}{\hat{\mathds{E}}}
52 \newcommand{\mutinf}{\mathds{I}}
53 \newcommand{\empmutinf}{\hat{\mathds{I}}}
54 \newcommand{\entropy}{\mathds{H}}
55 \newcommand{\empentropy}{\hat{\mathds{H}}}
56 \newcommand{\ganG}{\mathbf{G}}
57 \newcommand{\ganD}{\mathbf{D}}
58 \newcommand{\ganF}{\mathbf{F}}
59
60 \newcommand{\dkl}{\mathds{D}_{\mathsf{KL}}}
61 \newcommand{\djs}{\mathds{D}_{\mathsf{JS}}}
62
63 \allowdisplaybreaks[2]
64
65 \newcommand*{\vertbar}{\rule[-1ex]{0.5pt}{2.5ex}}
66 \newcommand*{\horzbar}{\rule[.5ex]{2.5ex}{0.5pt}}
67
68 \def\positionalencoding{\operatorname{pos-enc}}
69 \def\concat{\operatorname{concat}}
70 \def\crossentropy{\LL_{\operatorname{ce}}}
71
72 \begin{document}
73
74 \vspace*{0ex}
75
76 \begin{center}
77 {\Large The Evidence Lower Bound}
78
79 Fran\c cois Fleuret
80
81 \today
82
83 \vspace*{1ex}
84
85 \end{center}
86
87 Given a training set $x_1, \dots, x_N$ that follows an unknown
88 distribution $\mu_X$, we want to fit a model $p_\theta(x,z)$ to it,
89 maximizing
90 %
91 \[
92 \sum_n \log \, p_\theta(x_n).
93 \]
94 %
95 If we do not have a analytical form of the marginal $p_\theta(x_n)$
96 but only the expression of $p_\theta(x_n,z)$, we can get an estimate
97 of the marginal by sampling $z$ with any distribution $q$
98 %
99 \begin{align*}
100 p_\theta(x_n) & = \int_z p_\theta(x_n,z) dz                   \\
101               & = \int_z \frac{p_\theta(x_n,z)}{q(z)} q(z) dz \\
102               & = \expect_{Z \sim q(z)} \left[\frac{p_\theta(x_n,Z)}{q(Z)}\right].
103 \end{align*}
104 %
105 So if we wanted to maximize $p_\theta(x_n)$ alone, we could sample a
106 $Z$ with $q$ and maximize
107 %
108 \begin{equation*}
109 \frac{p_\theta(x_n,Z)}{q(Z)}.\label{eq:estimator}
110 \end{equation*}
111
112 But we want to maximize $\sum_n \log \, p_\theta(x_n)$. If we use the
113 $\log$ of the previous expression, we can decompose its average value
114 as
115 \begin{align*}
116  & \expect_{Z \sim q(z)} \left[ \log \frac{p_\theta(x_n,Z)}{q(Z)} \right]                                \\
117  & = \expect_{Z \sim q(z)} \left[ \log \frac{p_\theta(Z \mid x_n) \, p_\theta(x_n)}{q(Z)} \right]        \\
118  & = \expect_{Z \sim q(z)} \left[ \log \frac{p_\theta(Z \mid x_n)}{q(Z)} \right] + \log \, p_\theta(x_n) \\
119  & = - \dkl(q(z) \, \| \, p_\theta(z \mid x_n)) + \log \, p_\theta(x_n).
120 \end{align*}
121 %
122 Hence this does not maximize $\log \, p_\theta(x_n)$ on average, but a
123 \emph{lower bound} of it, since the KL divergence is non-negative. And
124 since this maximization pushes that KL term down, it also aligns
125 $p_\theta(z \mid x_n)$ and $q(z)$, and we may get a worse
126 $p_\theta(x_n)$ to bring $p_\theta(z \mid x_n)$ closer to $q(z)$.
127
128 However, all this analysis is still valid if $q$ is a parameterized
129 function $q_\alpha(z \mid x_n)$ of $x_n$. In that case, if we optimize
130 $\theta$ and $\alpha$ to maximize
131 %
132 \[
133 \expect_{Z \sim q_\alpha(z \mid x_n)} \left[ \log \frac{p_\theta(x_n,Z)}{q_\alpha(Z \mid x_n)} \right],
134 \]
135 %
136 it maximizes $\log \, p_\theta(x_n)$ and brings $q_\alpha(z \mid
137 x_n)$ close to $p_\theta(z \mid x_n)$.
138
139
140 \end{document}